Lines Matching +full:data +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0
89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
140 * 2) Returns 0 for exit to user-space
157 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn()
173 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn()
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
199 vcpu->stat.wfi_exit_stat++; in wfi_insn()
219 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
220 * emulation or in-kernel emulation
223 * @run: The VCPU run struct containing the CSR data
231 if (vcpu->arch.csr_decode.return_handled) in kvm_riscv_vcpu_csr_return()
233 vcpu->arch.csr_decode.return_handled = 1; in kvm_riscv_vcpu_csr_return()
236 insn = vcpu->arch.csr_decode.insn; in kvm_riscv_vcpu_csr_return()
238 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return()
239 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
242 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return()
252 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn()
259 wr_mask = -1UL; in csr_insn()
264 new_val = -1UL; in csr_insn()
271 wr_mask = -1UL; in csr_insn()
276 new_val = -1UL; in csr_insn()
287 vcpu->arch.csr_decode.insn = insn; in csr_insn()
288 vcpu->arch.csr_decode.return_handled = 0; in csr_insn()
291 run->riscv_csr.csr_num = csr_num; in csr_insn()
292 run->riscv_csr.new_value = new_val; in csr_insn()
293 run->riscv_csr.write_mask = wr_mask; in csr_insn()
294 run->riscv_csr.ret_value = 0; in csr_insn()
296 /* Find in-kernel CSR function */ in csr_insn()
299 if ((tcfn->base <= csr_num) && in csr_insn()
300 (csr_num < (tcfn->base + tcfn->count))) { in csr_insn()
306 /* First try in-kernel CSR emulation */ in csr_insn()
307 if (cfn && cfn->func) { in csr_insn()
308 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); in csr_insn()
311 run->riscv_csr.ret_value = val; in csr_insn()
312 vcpu->stat.csr_exit_kernel++; in csr_insn()
320 /* Exit to user-space for CSR emulation */ in csr_insn()
322 vcpu->stat.csr_exit_user++; in csr_insn()
323 run->exit_reason = KVM_EXIT_RISCV_CSR; in csr_insn()
375 if ((insn & ifn->mask) == ifn->match) { in system_opcode_insn()
376 rc = ifn->func(vcpu, run, insn); in system_opcode_insn()
387 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn()
397 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
400 * @run: The VCPU run struct containing the mmio data
403 * Returns > 0 to continue run-loop
404 * Returns 0 to exit run-loop and handle in user-space.
405 * Returns < 0 to report failure and exit run-loop
410 unsigned long insn = trap->stval; in kvm_riscv_vcpu_virtual_insn()
416 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn()
418 ct->sepc, in kvm_riscv_vcpu_virtual_insn()
421 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_virtual_insn()
439 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
442 * @run: The VCPU run struct containing the mmio data
446 * Returns > 0 to continue run-loop
447 * Returns 0 to exit run-loop and handle in user-space.
448 * Returns < 0 to report failure and exit run-loop
456 int shift = 0, len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_load() local
458 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load()
473 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_load()
477 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_load()
484 /* Decode length of MMIO and shift */ in kvm_riscv_vcpu_mmio_load()
487 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
490 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
493 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
497 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
503 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
509 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
514 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
518 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
523 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
525 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_load()
529 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_load()
530 return -EIO; in kvm_riscv_vcpu_mmio_load()
533 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_load()
534 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_load()
535 vcpu->arch.mmio_decode.shift = shift; in kvm_riscv_vcpu_mmio_load()
536 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_load()
537 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_load()
540 run->mmio.is_write = false; in kvm_riscv_vcpu_mmio_load()
541 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_load()
542 run->mmio.len = len; in kvm_riscv_vcpu_mmio_load()
547 memcpy(run->mmio.data, data_buf, len); in kvm_riscv_vcpu_mmio_load()
548 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_load()
554 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_load()
555 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_load()
561 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
564 * @run: The VCPU run struct containing the mmio data
568 * Returns > 0 to continue run-loop
569 * Returns 0 to exit run-loop and handle in user-space.
570 * Returns < 0 to report failure and exit run-loop
580 ulong data; in kvm_riscv_vcpu_mmio_store() local
584 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
599 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_store()
603 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_store()
610 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
611 data8 = data16 = data32 = data64 = data; in kvm_riscv_vcpu_mmio_store()
626 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
630 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
634 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
638 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
640 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
644 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_store()
645 return -EIO; in kvm_riscv_vcpu_mmio_store()
648 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_store()
649 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_store()
650 vcpu->arch.mmio_decode.shift = 0; in kvm_riscv_vcpu_mmio_store()
651 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_store()
652 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_store()
654 /* Copy data to kvm_run instance */ in kvm_riscv_vcpu_mmio_store()
657 *((u8 *)run->mmio.data) = data8; in kvm_riscv_vcpu_mmio_store()
660 *((u16 *)run->mmio.data) = data16; in kvm_riscv_vcpu_mmio_store()
663 *((u32 *)run->mmio.data) = data32; in kvm_riscv_vcpu_mmio_store()
666 *((u64 *)run->mmio.data) = data64; in kvm_riscv_vcpu_mmio_store()
669 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
673 run->mmio.is_write = true; in kvm_riscv_vcpu_mmio_store()
674 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_store()
675 run->mmio.len = len; in kvm_riscv_vcpu_mmio_store()
679 fault_addr, len, run->mmio.data)) { in kvm_riscv_vcpu_mmio_store()
681 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_store()
687 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_store()
688 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_store()
694 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
695 * or in-kernel IO emulation
698 * @run: The VCPU run struct containing the mmio data
707 int len, shift; in kvm_riscv_vcpu_mmio_return() local
709 if (vcpu->arch.mmio_decode.return_handled) in kvm_riscv_vcpu_mmio_return()
712 vcpu->arch.mmio_decode.return_handled = 1; in kvm_riscv_vcpu_mmio_return()
713 insn = vcpu->arch.mmio_decode.insn; in kvm_riscv_vcpu_mmio_return()
715 if (run->mmio.is_write) in kvm_riscv_vcpu_mmio_return()
718 len = vcpu->arch.mmio_decode.len; in kvm_riscv_vcpu_mmio_return()
719 shift = vcpu->arch.mmio_decode.shift; in kvm_riscv_vcpu_mmio_return()
723 data8 = *((u8 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
724 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
725 (ulong)data8 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
728 data16 = *((u16 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
729 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
730 (ulong)data16 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
733 data32 = *((u32 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
734 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
735 (ulong)data32 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
738 data64 = *((u64 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
739 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
740 (ulong)data64 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
743 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_return()
748 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; in kvm_riscv_vcpu_mmio_return()