Lines Matching +full:run +full:- +full:time
23 #include <asm/cpu-info.h>
45 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
52 return -EINVAL; in kvm_compute_return_epc()
65 arch->gprs[insn.r_format.rd] = epc + 8; in kvm_compute_return_epc()
68 nextpc = arch->gprs[insn.r_format.rs]; in kvm_compute_return_epc()
71 return -EINVAL; in kvm_compute_return_epc()
84 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc()
93 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc()
102 arch->gprs[31] = epc + 8; in kvm_compute_return_epc()
103 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc()
112 arch->gprs[31] = epc + 8; in kvm_compute_return_epc()
113 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc()
123 return -EINVAL; in kvm_compute_return_epc()
135 return -EINVAL; in kvm_compute_return_epc()
141 arch->gprs[31] = instpc + 8; in kvm_compute_return_epc()
154 if (arch->gprs[insn.i_format.rs] == in kvm_compute_return_epc()
155 arch->gprs[insn.i_format.rt]) in kvm_compute_return_epc()
164 if (arch->gprs[insn.i_format.rs] != in kvm_compute_return_epc()
165 arch->gprs[insn.i_format.rt]) in kvm_compute_return_epc()
178 if ((long)arch->gprs[insn.i_format.rs] <= 0) in kvm_compute_return_epc()
191 if ((long)arch->gprs[insn.i_format.rs] > 0) in kvm_compute_return_epc()
201 return -EINVAL; in kvm_compute_return_epc()
210 return -EINVAL; in kvm_compute_return_epc()
216 return -EINVAL; in kvm_compute_return_epc()
222 return -EINVAL; in kvm_compute_return_epc()
233 /* Fall through - Compact branches not supported before R6 */ in kvm_compute_return_epc()
236 return -EINVAL; in kvm_compute_return_epc()
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc()
249 &vcpu->arch.pc); in update_pc()
253 vcpu->arch.pc += 4; in update_pc()
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
262 * kvm_get_badinstr() - Get bad instruction encoding.
275 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr()
279 return -EINVAL; in kvm_get_badinstr()
284 * kvm_get_badinstrp() - Get bad prior instruction encoding.
297 *out = vcpu->arch.host_cp0_badinstrp; in kvm_get_badinstrp()
301 return -EINVAL; in kvm_get_badinstrp()
306 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
315 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disabled()
317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || in kvm_mips_count_disabled()
322 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
324 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
334 delta = now_ns + vcpu->arch.count_dyn_bias; in kvm_mips_ktime_to_count()
336 if (delta >= vcpu->arch.count_period) { in kvm_mips_ktime_to_count()
338 periods = div64_s64(now_ns, vcpu->arch.count_period); in kvm_mips_ktime_to_count()
339 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; in kvm_mips_ktime_to_count()
341 delta = now_ns + vcpu->arch.count_dyn_bias; in kvm_mips_ktime_to_count()
354 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); in kvm_mips_ktime_to_count()
358 * kvm_mips_count_time() - Get effective current time.
363 * count_resume, i.e. the time that the count was disabled.
369 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) in kvm_mips_count_time()
370 return vcpu->arch.count_resume; in kvm_mips_count_time()
376 * kvm_mips_read_count_running() - Read the current count value as if running.
378 * @now: Kernel time to read CP0_Count at.
380 * Returns the current guest CP0_Count register at time @now and handles if the
387 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_read_count_running()
393 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_read_count_running()
400 if ((s32)(count - compare) < 0) in kvm_mips_read_count_running()
406 * looking at whether the interval until the hrtimer expiry time is in kvm_mips_read_count_running()
409 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); in kvm_mips_read_count_running()
410 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); in kvm_mips_read_count_running()
416 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_read_count_running()
419 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_read_count_running()
422 * Restart the timer if it was running based on the expiry time in kvm_mips_read_count_running()
427 vcpu->arch.count_period); in kvm_mips_read_count_running()
428 hrtimer_start(&vcpu->arch.comparecount_timer, expires, in kvm_mips_read_count_running()
437 * kvm_mips_read_count() - Read the current count value.
447 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_read_count()
457 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
465 * This is useful where the time/CP0_Count is needed in the calculation of the
476 /* stop hrtimer before finding time */ in kvm_mips_freeze_hrtimer()
477 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_freeze_hrtimer()
487 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
505 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_resume_hrtimer()
512 delta = (u64)(u32)(compare - count - 1) + 1; in kvm_mips_resume_hrtimer()
513 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); in kvm_mips_resume_hrtimer()
517 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_resume_hrtimer()
518 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); in kvm_mips_resume_hrtimer()
522 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
524 * @before: Time before Count was saved, lower bound of drift calculation.
531 * to be used for a period of time, but the exact ktime corresponding to the
552 before_count = vcpu->arch.count_bias + in kvm_mips_restore_hrtimer()
559 * time to jump forwards a little, within reason. If the drift is too in kvm_mips_restore_hrtimer()
562 drift = count - before_count; in kvm_mips_restore_hrtimer()
565 vcpu->arch.count_bias += drift; in kvm_mips_restore_hrtimer()
572 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_restore_hrtimer()
576 * adjust the bias to avoid guest time going backwards. in kvm_mips_restore_hrtimer()
578 drift = count - now_count; in kvm_mips_restore_hrtimer()
581 vcpu->arch.count_bias += drift; in kvm_mips_restore_hrtimer()
587 delta = (u64)(u32)(now_count - count); in kvm_mips_restore_hrtimer()
588 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); in kvm_mips_restore_hrtimer()
598 * kvm_mips_write_count() - Modify the count and update timer.
606 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_write_count()
611 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_write_count()
622 * kvm_mips_init_count() - Initialise timer.
631 vcpu->arch.count_hz = count_hz; in kvm_mips_init_count()
632 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); in kvm_mips_init_count()
633 vcpu->arch.count_dyn_bias = 0; in kvm_mips_init_count()
640 * kvm_mips_set_count_hz() - Update the frequency of the timer.
647 * Returns: -EINVAL if @count_hz is out of range.
652 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_count_hz()
659 return -EINVAL; in kvm_mips_set_count_hz()
661 if (vcpu->arch.count_hz == count_hz) in kvm_mips_set_count_hz()
674 vcpu->arch.count_hz = count_hz; in kvm_mips_set_count_hz()
675 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); in kvm_mips_set_count_hz()
676 vcpu->arch.count_dyn_bias = 0; in kvm_mips_set_count_hz()
679 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_set_count_hz()
688 * kvm_mips_write_compare() - Modify compare and update timer.
699 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_write_compare()
702 s32 delta = compare - old_compare; in kvm_mips_write_compare()
711 kvm_mips_callbacks->dequeue_timer_int(vcpu); in kvm_mips_write_compare()
727 write_c0_gtoffset(compare - read_c0_count()); in kvm_mips_write_compare()
737 kvm_mips_callbacks->dequeue_timer_int(vcpu); in kvm_mips_write_compare()
765 write_c0_gtoffset(compare - read_c0_count()); in kvm_mips_write_compare()
769 * kvm_mips_count_disable() - Disable count.
773 * time will be handled but not after.
778 * Returns: The time that the timer was stopped.
782 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disable()
787 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_count_disable()
798 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
802 * before the final stop time will be handled if the timer isn't disabled by
809 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disable_cause()
812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) in kvm_mips_count_disable_cause()
817 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
821 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
829 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_enable_cause()
844 * kvm_mips_set_count_ctl() - Update the count control KVM register.
850 * Returns: -EINVAL if reserved bits are set.
855 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_count_ctl()
856 s64 changed = count_ctl ^ vcpu->arch.count_ctl; in kvm_mips_set_count_ctl()
863 return -EINVAL; in kvm_mips_set_count_ctl()
866 vcpu->arch.count_ctl = count_ctl; in kvm_mips_set_count_ctl()
873 /* Just record the current time */ in kvm_mips_set_count_ctl()
874 vcpu->arch.count_resume = ktime_get(); in kvm_mips_set_count_ctl()
876 /* disable timer and record current time */ in kvm_mips_set_count_ctl()
877 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); in kvm_mips_set_count_ctl()
881 * time (wrap 0 to 2^32). in kvm_mips_set_count_ctl()
885 delta = (u64)(u32)(compare - count - 1) + 1; in kvm_mips_set_count_ctl()
887 vcpu->arch.count_hz); in kvm_mips_set_count_ctl()
888 expire = ktime_add_ns(vcpu->arch.count_resume, delta); in kvm_mips_set_count_ctl()
894 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_set_count_ctl()
906 * kvm_mips_set_count_resume() - Update the count resume KVM register.
912 * Returns: -EINVAL if out of valid range (0..now).
918 * It doesn't make sense for the resume time to be in the future, as it in kvm_mips_set_count_resume()
923 return -EINVAL; in kvm_mips_set_count_resume()
925 vcpu->arch.count_resume = ns_to_ktime(count_resume); in kvm_mips_set_count_resume()
930 * kvm_mips_count_timeout() - Push timer forward on timeout.
939 /* Add the Count period to the current expiry time */ in kvm_mips_count_timeout()
940 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, in kvm_mips_count_timeout()
941 vcpu->arch.count_period); in kvm_mips_count_timeout()
947 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, in kvm_mips_emul_wait()
948 vcpu->arch.pending_exceptions); in kvm_mips_emul_wait()
950 ++vcpu->stat.wait_exits; in kvm_mips_emul_wait()
952 if (!vcpu->arch.pending_exceptions) { in kvm_mips_emul_wait()
954 vcpu->arch.wait = 1; in kvm_mips_emul_wait()
962 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in kvm_mips_emul_wait()
975 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_store() local
976 void *data = run->mmio.data; in kvm_mips_emulate_store()
984 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_store()
991 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
992 vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_store()
993 if (run->mmio.phys_addr == KVM_INVALID_ADDR) in kvm_mips_emulate_store()
999 run->mmio.len = 8; in kvm_mips_emulate_store()
1000 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1003 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1004 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1009 run->mmio.len = 4; in kvm_mips_emulate_store()
1010 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1013 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1014 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1018 run->mmio.len = 2; in kvm_mips_emulate_store()
1019 *(u16 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1022 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1023 vcpu->arch.gprs[rt], *(u16 *)data); in kvm_mips_emulate_store()
1027 run->mmio.len = 1; in kvm_mips_emulate_store()
1028 *(u8 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1031 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1032 vcpu->arch.gprs[rt], *(u8 *)data); in kvm_mips_emulate_store()
1036 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1037 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_store()
1038 run->mmio.len = 4; in kvm_mips_emulate_store()
1039 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_store()
1043 (vcpu->arch.gprs[rt] >> 24); in kvm_mips_emulate_store()
1047 (vcpu->arch.gprs[rt] >> 16); in kvm_mips_emulate_store()
1051 (vcpu->arch.gprs[rt] >> 8); in kvm_mips_emulate_store()
1054 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1061 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1062 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1066 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1067 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_store()
1068 run->mmio.len = 4; in kvm_mips_emulate_store()
1069 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_store()
1072 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1076 (vcpu->arch.gprs[rt] << 8); in kvm_mips_emulate_store()
1080 (vcpu->arch.gprs[rt] << 16); in kvm_mips_emulate_store()
1084 (vcpu->arch.gprs[rt] << 24); in kvm_mips_emulate_store()
1091 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1092 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1097 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1098 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_store()
1100 run->mmio.len = 8; in kvm_mips_emulate_store()
1101 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_store()
1105 ((vcpu->arch.gprs[rt] >> 56) & 0xff); in kvm_mips_emulate_store()
1109 ((vcpu->arch.gprs[rt] >> 48) & 0xffff); in kvm_mips_emulate_store()
1113 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); in kvm_mips_emulate_store()
1117 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); in kvm_mips_emulate_store()
1121 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); in kvm_mips_emulate_store()
1125 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); in kvm_mips_emulate_store()
1129 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); in kvm_mips_emulate_store()
1132 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1139 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1140 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1144 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1145 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_store()
1147 run->mmio.len = 8; in kvm_mips_emulate_store()
1148 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_store()
1151 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1155 (vcpu->arch.gprs[rt] << 8); in kvm_mips_emulate_store()
1159 (vcpu->arch.gprs[rt] << 16); in kvm_mips_emulate_store()
1163 (vcpu->arch.gprs[rt] << 24); in kvm_mips_emulate_store()
1167 (vcpu->arch.gprs[rt] << 32); in kvm_mips_emulate_store()
1171 (vcpu->arch.gprs[rt] << 40); in kvm_mips_emulate_store()
1175 (vcpu->arch.gprs[rt] << 48); in kvm_mips_emulate_store()
1179 (vcpu->arch.gprs[rt] << 56); in kvm_mips_emulate_store()
1186 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1187 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1196 * Loongson-3 overridden sdc2 instructions. in kvm_mips_emulate_store()
1204 run->mmio.len = 1; in kvm_mips_emulate_store()
1205 *(u8 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1208 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1209 vcpu->arch.gprs[rt], *(u8 *)data); in kvm_mips_emulate_store()
1212 run->mmio.len = 2; in kvm_mips_emulate_store()
1213 *(u16 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1216 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1217 vcpu->arch.gprs[rt], *(u16 *)data); in kvm_mips_emulate_store()
1220 run->mmio.len = 4; in kvm_mips_emulate_store()
1221 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1224 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1225 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1228 run->mmio.len = 8; in kvm_mips_emulate_store()
1229 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1232 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1233 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1236 kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n", in kvm_mips_emulate_store()
1248 vcpu->mmio_needed = 1; in kvm_mips_emulate_store()
1249 run->mmio.is_write = 1; in kvm_mips_emulate_store()
1250 vcpu->mmio_is_write = 1; in kvm_mips_emulate_store()
1253 run->mmio.phys_addr, run->mmio.len, data); in kvm_mips_emulate_store()
1256 vcpu->mmio_needed = 0; in kvm_mips_emulate_store()
1264 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_store()
1271 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_load() local
1286 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_load()
1290 vcpu->arch.io_pc = vcpu->arch.pc; in kvm_mips_emulate_load()
1291 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_load()
1293 vcpu->arch.io_gpr = rt; in kvm_mips_emulate_load()
1295 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1296 vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_load()
1297 if (run->mmio.phys_addr == KVM_INVALID_ADDR) in kvm_mips_emulate_load()
1300 vcpu->mmio_needed = 2; /* signed */ in kvm_mips_emulate_load()
1304 run->mmio.len = 8; in kvm_mips_emulate_load()
1308 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1312 run->mmio.len = 4; in kvm_mips_emulate_load()
1316 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1319 run->mmio.len = 2; in kvm_mips_emulate_load()
1323 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1326 run->mmio.len = 1; in kvm_mips_emulate_load()
1330 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1331 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_load()
1333 run->mmio.len = 4; in kvm_mips_emulate_load()
1334 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_load()
1337 vcpu->mmio_needed = 3; /* 1 byte */ in kvm_mips_emulate_load()
1340 vcpu->mmio_needed = 4; /* 2 bytes */ in kvm_mips_emulate_load()
1343 vcpu->mmio_needed = 5; /* 3 bytes */ in kvm_mips_emulate_load()
1346 vcpu->mmio_needed = 6; /* 4 bytes */ in kvm_mips_emulate_load()
1354 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1355 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_load()
1357 run->mmio.len = 4; in kvm_mips_emulate_load()
1358 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_load()
1361 vcpu->mmio_needed = 7; /* 4 bytes */ in kvm_mips_emulate_load()
1364 vcpu->mmio_needed = 8; /* 3 bytes */ in kvm_mips_emulate_load()
1367 vcpu->mmio_needed = 9; /* 2 bytes */ in kvm_mips_emulate_load()
1370 vcpu->mmio_needed = 10; /* 1 byte */ in kvm_mips_emulate_load()
1379 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1380 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_load()
1382 run->mmio.len = 8; in kvm_mips_emulate_load()
1383 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_load()
1386 vcpu->mmio_needed = 11; /* 1 byte */ in kvm_mips_emulate_load()
1389 vcpu->mmio_needed = 12; /* 2 bytes */ in kvm_mips_emulate_load()
1392 vcpu->mmio_needed = 13; /* 3 bytes */ in kvm_mips_emulate_load()
1395 vcpu->mmio_needed = 14; /* 4 bytes */ in kvm_mips_emulate_load()
1398 vcpu->mmio_needed = 15; /* 5 bytes */ in kvm_mips_emulate_load()
1401 vcpu->mmio_needed = 16; /* 6 bytes */ in kvm_mips_emulate_load()
1404 vcpu->mmio_needed = 17; /* 7 bytes */ in kvm_mips_emulate_load()
1407 vcpu->mmio_needed = 18; /* 8 bytes */ in kvm_mips_emulate_load()
1415 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1416 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_load()
1418 run->mmio.len = 8; in kvm_mips_emulate_load()
1419 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_load()
1422 vcpu->mmio_needed = 19; /* 8 bytes */ in kvm_mips_emulate_load()
1425 vcpu->mmio_needed = 20; /* 7 bytes */ in kvm_mips_emulate_load()
1428 vcpu->mmio_needed = 21; /* 6 bytes */ in kvm_mips_emulate_load()
1431 vcpu->mmio_needed = 22; /* 5 bytes */ in kvm_mips_emulate_load()
1434 vcpu->mmio_needed = 23; /* 4 bytes */ in kvm_mips_emulate_load()
1437 vcpu->mmio_needed = 24; /* 3 bytes */ in kvm_mips_emulate_load()
1440 vcpu->mmio_needed = 25; /* 2 bytes */ in kvm_mips_emulate_load()
1443 vcpu->mmio_needed = 26; /* 1 byte */ in kvm_mips_emulate_load()
1456 * Loongson-3 overridden ldc2 instructions. in kvm_mips_emulate_load()
1464 run->mmio.len = 1; in kvm_mips_emulate_load()
1465 vcpu->mmio_needed = 27; /* signed */ in kvm_mips_emulate_load()
1468 run->mmio.len = 2; in kvm_mips_emulate_load()
1469 vcpu->mmio_needed = 28; /* signed */ in kvm_mips_emulate_load()
1472 run->mmio.len = 4; in kvm_mips_emulate_load()
1473 vcpu->mmio_needed = 29; /* signed */ in kvm_mips_emulate_load()
1476 run->mmio.len = 8; in kvm_mips_emulate_load()
1477 vcpu->mmio_needed = 30; /* signed */ in kvm_mips_emulate_load()
1480 kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n", in kvm_mips_emulate_load()
1490 vcpu->mmio_needed = 0; in kvm_mips_emulate_load()
1494 run->mmio.is_write = 0; in kvm_mips_emulate_load()
1495 vcpu->mmio_is_write = 0; in kvm_mips_emulate_load()
1498 run->mmio.phys_addr, run->mmio.len, run->mmio.data); in kvm_mips_emulate_load()
1502 vcpu->mmio_needed = 0; in kvm_mips_emulate_load()
1511 struct kvm_run *run = vcpu->run; in kvm_mips_complete_mmio_load() local
1512 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_mips_complete_mmio_load()
1515 if (run->mmio.len > sizeof(*gpr)) { in kvm_mips_complete_mmio_load()
1516 kvm_err("Bad MMIO length: %d", run->mmio.len); in kvm_mips_complete_mmio_load()
1522 vcpu->arch.pc = vcpu->arch.io_pc; in kvm_mips_complete_mmio_load()
1524 switch (run->mmio.len) { in kvm_mips_complete_mmio_load()
1526 switch (vcpu->mmio_needed) { in kvm_mips_complete_mmio_load()
1528 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | in kvm_mips_complete_mmio_load()
1529 (((*(s64 *)run->mmio.data) & 0xff) << 56); in kvm_mips_complete_mmio_load()
1532 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | in kvm_mips_complete_mmio_load()
1533 (((*(s64 *)run->mmio.data) & 0xffff) << 48); in kvm_mips_complete_mmio_load()
1536 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | in kvm_mips_complete_mmio_load()
1537 (((*(s64 *)run->mmio.data) & 0xffffff) << 40); in kvm_mips_complete_mmio_load()
1540 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | in kvm_mips_complete_mmio_load()
1541 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); in kvm_mips_complete_mmio_load()
1544 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | in kvm_mips_complete_mmio_load()
1545 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); in kvm_mips_complete_mmio_load()
1548 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | in kvm_mips_complete_mmio_load()
1549 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); in kvm_mips_complete_mmio_load()
1552 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | in kvm_mips_complete_mmio_load()
1553 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); in kvm_mips_complete_mmio_load()
1557 *gpr = *(s64 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1560 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | in kvm_mips_complete_mmio_load()
1561 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); in kvm_mips_complete_mmio_load()
1564 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | in kvm_mips_complete_mmio_load()
1565 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); in kvm_mips_complete_mmio_load()
1568 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | in kvm_mips_complete_mmio_load()
1569 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); in kvm_mips_complete_mmio_load()
1572 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | in kvm_mips_complete_mmio_load()
1573 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); in kvm_mips_complete_mmio_load()
1576 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | in kvm_mips_complete_mmio_load()
1577 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); in kvm_mips_complete_mmio_load()
1580 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | in kvm_mips_complete_mmio_load()
1581 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); in kvm_mips_complete_mmio_load()
1584 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | in kvm_mips_complete_mmio_load()
1585 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); in kvm_mips_complete_mmio_load()
1588 *gpr = *(s64 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1593 switch (vcpu->mmio_needed) { in kvm_mips_complete_mmio_load()
1595 *gpr = *(u32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1598 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1601 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | in kvm_mips_complete_mmio_load()
1602 (((*(s32 *)run->mmio.data) & 0xff) << 24); in kvm_mips_complete_mmio_load()
1605 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | in kvm_mips_complete_mmio_load()
1606 (((*(s32 *)run->mmio.data) & 0xffff) << 16); in kvm_mips_complete_mmio_load()
1609 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | in kvm_mips_complete_mmio_load()
1610 (((*(s32 *)run->mmio.data) & 0xffffff) << 8); in kvm_mips_complete_mmio_load()
1614 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1617 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | in kvm_mips_complete_mmio_load()
1618 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); in kvm_mips_complete_mmio_load()
1621 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | in kvm_mips_complete_mmio_load()
1622 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); in kvm_mips_complete_mmio_load()
1625 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | in kvm_mips_complete_mmio_load()
1626 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); in kvm_mips_complete_mmio_load()
1629 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1634 if (vcpu->mmio_needed == 1) in kvm_mips_complete_mmio_load()
1635 *gpr = *(u16 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1637 *gpr = *(s16 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1641 if (vcpu->mmio_needed == 1) in kvm_mips_complete_mmio_load()
1642 *gpr = *(u8 *)run->mmio.data; in kvm_mips_complete_mmio_load()
1644 *gpr = *(s8 *)run->mmio.data; in kvm_mips_complete_mmio_load()