Lines Matching full:arch
14 * This file is derived from arch/powerpc/kvm/book3s.c,
240 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
277 * Updates to busy_stolen are protected by arch.tbacct_lock;
317 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
322 if (vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
323 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); in kvmppc_core_vcpu_load_hv()
324 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
325 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
341 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
342 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
343 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
344 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
345 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
347 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
352 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
361 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); in kvmppc_core_vcpu_put_hv()
367 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
368 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
377 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
378 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
379 vcpu->arch.busy_preempt = now; in kvmppc_core_vcpu_put_hv()
380 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
385 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
394 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
455 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
461 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
463 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
465 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
467 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
469 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
470 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
472 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
473 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
474 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
476 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
478 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
479 vcpu->arch.last_inst); in kvmppc_dump_regs()
499 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
505 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
568 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
581 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
592 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
595 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
602 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
605 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
612 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
613 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
616 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
621 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
626 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
637 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
659 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
664 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
694 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
695 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
696 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
699 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
700 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
701 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
702 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
703 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
705 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
706 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
707 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
708 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
710 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
711 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
712 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
742 dt = vcpu->arch.dtl_ptr; in __kvmppc_create_dtl_entry()
749 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); in __kvmppc_create_dtl_entry()
756 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in __kvmppc_create_dtl_entry()
759 if (dt == vcpu->arch.dtl.pinned_end) in __kvmppc_create_dtl_entry()
760 dt = vcpu->arch.dtl.pinned_addr; in __kvmppc_create_dtl_entry()
761 vcpu->arch.dtl_ptr = dt; in __kvmppc_create_dtl_entry()
764 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in __kvmppc_create_dtl_entry()
766 /* vcpu->arch.dtl.dirty is set by the caller */ in __kvmppc_create_dtl_entry()
778 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch()
785 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch()
786 vcpu->arch.stolen_logged = core_stolen; in kvmppc_update_vpa_dispatch()
787 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
788 stolen += vcpu->arch.busy_stolen; in kvmppc_update_vpa_dispatch()
789 vcpu->arch.busy_stolen = 0; in kvmppc_update_vpa_dispatch()
790 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
796 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch()
807 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch_p9()
812 stolen_delta = stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch_p9()
813 vcpu->arch.stolen_logged = stolen; in kvmppc_update_vpa_dispatch_p9()
819 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch_p9()
828 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
838 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
845 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
847 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
868 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
879 vcpu->arch.dawr0 = value1; in kvmppc_h_set_mode()
880 vcpu->arch.dawrx0 = value2; in kvmppc_h_set_mode()
889 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
895 vcpu->arch.dawr1 = value1; in kvmppc_h_set_mode()
896 vcpu->arch.dawrx1 = value2; in kvmppc_h_set_mode()
988 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
1003 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
1018 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1019 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1022 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1047 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
1083 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
1098 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1157 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
1159 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
1182 if (list_empty(&kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1268 if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1)) in kvmppc_pseries_do_hcall()
1292 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1296 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1346 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1357 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1370 vcpu->arch.shregs.msr |= MSR_EE; in kvmppc_cede()
1371 vcpu->arch.ceded = 1; in kvmppc_cede()
1373 if (vcpu->arch.prodded) { in kvmppc_cede()
1374 vcpu->arch.prodded = 0; in kvmppc_cede()
1376 vcpu->arch.ceded = 0; in kvmppc_cede()
1428 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1446 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1485 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1492 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1497 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1498 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1506 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1507 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1538 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) in kvmppc_pmu_unavailable()
1541 vcpu->arch.hfscr |= HFSCR_PM; in kvmppc_pmu_unavailable()
1548 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) in kvmppc_ebb_unavailable()
1551 vcpu->arch.hfscr |= HFSCR_EBB; in kvmppc_ebb_unavailable()
1558 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) in kvmppc_tm_unavailable()
1561 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_tm_unavailable()
1582 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1585 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1586 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1589 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1594 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1598 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_exit_hv()
1624 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1632 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1633 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; in kvmppc_handle_exit_hv()
1641 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1645 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1662 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1671 if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_handle_exit_hv()
1706 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1725 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { in kvmppc_handle_exit_hv()
1741 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1743 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1748 if (!(vcpu->arch.shregs.msr & MSR_DR)) in kvmppc_handle_exit_hv()
1749 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1751 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1753 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1754 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1761 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1770 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1771 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & in kvmppc_handle_exit_hv()
1780 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1781 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1786 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1788 vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1793 if (!(vcpu->arch.shregs.msr & MSR_IR)) in kvmppc_handle_exit_hv()
1794 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1796 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1798 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1799 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1819 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1820 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1821 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1822 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1853 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_exit_hv()
1879 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1880 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1881 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1904 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_nested_exit()
1907 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1908 vcpu->arch.shregs.msr); in kvmppc_handle_nested_exit()
1912 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1929 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_nested_exit()
1947 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
1962 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
1963 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
1965 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
1966 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
1987 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_nested_exit()
1995 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || in kvmppc_handle_nested_exit()
1996 (vcpu->arch.nested_hfscr & (1UL << cause))) { in kvmppc_handle_nested_exit()
1997 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; in kvmppc_handle_nested_exit()
2004 &vcpu->arch.emul_inst); in kvmppc_handle_nested_exit()
2017 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
2053 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2054 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
2055 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2056 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2068 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
2072 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2074 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2075 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2079 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2131 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2161 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2164 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2166 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2189 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2192 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2195 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
2198 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
2201 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
2204 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
2207 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
2211 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
2214 *val = get_reg_val(id, vcpu->arch.mmcr[2]); in kvmppc_get_one_reg_hv()
2217 *val = get_reg_val(id, vcpu->arch.mmcra); in kvmppc_get_one_reg_hv()
2220 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2223 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_get_one_reg_hv()
2227 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
2231 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2234 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
2237 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
2240 *val = get_reg_val(id, vcpu->arch.sier[0]); in kvmppc_get_one_reg_hv()
2243 *val = get_reg_val(id, vcpu->arch.sier[1]); in kvmppc_get_one_reg_hv()
2246 *val = get_reg_val(id, vcpu->arch.sier[2]); in kvmppc_get_one_reg_hv()
2249 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
2252 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
2262 *val = get_reg_val(id, vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2264 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2267 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
2270 *val = get_reg_val(id, vcpu->arch.dawr0); in kvmppc_get_one_reg_hv()
2273 *val = get_reg_val(id, vcpu->arch.dawrx0); in kvmppc_get_one_reg_hv()
2276 *val = get_reg_val(id, vcpu->arch.dawr1); in kvmppc_get_one_reg_hv()
2279 *val = get_reg_val(id, vcpu->arch.dawrx1); in kvmppc_get_one_reg_hv()
2282 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
2285 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2288 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2291 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2294 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
2297 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2300 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
2303 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2306 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2309 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2310 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2311 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2314 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2315 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2316 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2317 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2320 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2321 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2322 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2323 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2326 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
2330 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
2333 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
2337 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2340 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2343 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2347 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2355 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2358 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2365 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2368 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2371 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2374 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2377 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2380 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2383 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2386 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2390 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2395 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2398 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2402 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
2405 *val = get_reg_val(id, vcpu->arch.dec_expires); in kvmppc_get_one_reg_hv()
2408 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2411 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2435 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2438 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2441 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2444 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2447 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2450 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2453 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2457 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2460 vcpu->arch.mmcr[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2463 vcpu->arch.mmcra = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2466 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2469 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
2473 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2477 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2480 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2483 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2486 vcpu->arch.sier[0] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2489 vcpu->arch.sier[1] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2492 vcpu->arch.sier[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2495 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2498 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2502 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; in kvmppc_set_one_reg_hv()
2504 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2507 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2510 vcpu->arch.dawr0 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2513 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2516 vcpu->arch.dawr1 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2519 vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2522 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2524 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
2525 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
2528 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2531 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2534 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2537 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2540 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2543 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2546 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2549 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2554 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2555 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2557 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2563 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2565 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2572 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2575 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2590 if (!vcpu->arch.dec_expires && tb_offset) in kvmppc_set_one_reg_hv()
2591 vcpu->arch.dec_expires = get_tb() + tb_offset; in kvmppc_set_one_reg_hv()
2593 vcpu->arch.vcore->tb_offset = tb_offset; in kvmppc_set_one_reg_hv()
2603 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2607 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2610 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2613 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2617 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2625 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2628 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2634 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2637 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2640 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2643 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2646 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2649 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2652 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2655 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2659 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2664 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2667 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2674 vcpu->arch.dec_expires = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2678 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2679 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2680 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2681 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2682 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2685 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2722 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2736 {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
2737 {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
2738 {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
2739 {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
2740 {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
2741 {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
2742 {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
2744 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2745 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2746 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2747 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2748 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2892 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2899 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2901 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2904 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2906 vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT; in kvmppc_core_vcpu_create_hv()
2907 vcpu->arch.mmcra = MMCRA_BHRB_DISABLE; in kvmppc_core_vcpu_create_hv()
2910 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2913 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2914 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2915 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2916 vcpu->arch.shregs.msr = MSR_ME; in kvmppc_core_vcpu_create_hv()
2917 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2926 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
2929 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2932 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2936 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2938 vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; in kvmppc_core_vcpu_create_hv()
2943 vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); in kvmppc_core_vcpu_create_hv()
2947 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2949 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2955 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2959 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2963 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2966 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2977 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2978 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2979 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2980 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2981 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2992 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2993 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2994 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2995 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2997 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
3030 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
3031 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
3032 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
3049 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3050 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
3051 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
3052 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
3053 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3074 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
3075 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
3085 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
3087 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3089 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
3090 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
3091 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
3092 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
3093 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3095 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
3147 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
3154 need_tlb_flush = &kvm->arch.need_tlb_flush; in radix_flush_cpu()
3192 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
3200 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
3208 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
3210 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3233 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3235 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3246 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3247 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3248 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3250 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3252 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3492 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3493 vcpu->arch.ret = -EINTR; in prepare_threads()
3494 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3495 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3496 vcpu->arch.dtl.update_pending) in prepare_threads()
3497 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3501 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3515 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
3544 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
3547 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3567 * so any vcpus becoming runnable will have their arch.trap in post_guest_process()
3579 if (vcpu->arch.trap) in post_guest_process()
3581 vcpu->arch.run_task); in post_guest_process()
3583 vcpu->arch.ret = ret; in post_guest_process()
3584 vcpu->arch.trap = 0; in post_guest_process()
3587 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3588 if (vcpu->arch.pending_exceptions) in post_guest_process()
3590 if (vcpu->arch.ceded) in post_guest_process()
3596 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3612 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3710 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3734 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3736 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3868 * It updates vcpu->cpu and vcpu->arch.thread_cpu in kvmppc_run_core()
3877 if (!vcpu->arch.ptid) in kvmppc_run_core()
3879 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
4010 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in vcpu_vpa_increment_dispatch()
4014 vcpu->arch.vpa.dirty = 1; in vcpu_vpa_increment_dispatch()
4021 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_vcpu_entry_p9_nested()
4048 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4049 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_vcpu_entry_p9_nested()
4054 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_vcpu_entry_p9_nested()
4056 if (vcpu->arch.nested) { in kvmhv_vcpu_entry_p9_nested()
4057 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_vcpu_entry_p9_nested()
4058 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4060 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_vcpu_entry_p9_nested()
4081 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_vcpu_entry_p9_nested()
4082 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_vcpu_entry_p9_nested()
4084 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_p9_nested()
4086 __pa(&vcpu->arch.regs)); in kvmhv_vcpu_entry_p9_nested()
4087 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_p9_nested()
4090 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_vcpu_entry_p9_nested()
4091 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_vcpu_entry_p9_nested()
4092 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_vcpu_entry_p9_nested()
4093 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_vcpu_entry_p9_nested()
4101 vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset); in kvmhv_vcpu_entry_p9_nested()
4106 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4119 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_p9_guest_entry()
4131 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4159 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmhv_p9_guest_entry()
4175 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4207 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4224 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4225 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4230 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4256 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4257 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4268 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4277 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcpu_check_block()
4422 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4423 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
4429 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
4432 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4446 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4447 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4453 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4455 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4456 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4457 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4458 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4459 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4460 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4481 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4484 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4492 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4506 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4510 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4511 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4514 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4519 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4521 v->arch.ceded = 0; in kvmppc_run_vcpu()
4538 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4547 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4551 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4558 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4563 return vcpu->arch.ret; in kvmppc_run_vcpu()
4575 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4582 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4583 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4585 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4586 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4587 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4588 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4591 if (unlikely(!kvm->arch.mmu_ready)) { in kvmhv_run_single_vcpu()
4596 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4614 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4618 if (need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4622 vcpu->arch.thread_cpu = pcpu; in kvmhv_run_single_vcpu()
4638 if (vcpu->arch.shregs.msr & MSR_EE) { in kvmhv_run_single_vcpu()
4643 &vcpu->arch.pending_exceptions)) { in kvmhv_run_single_vcpu()
4646 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4647 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4649 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4653 if (vcpu->arch.timer_running) { in kvmhv_run_single_vcpu()
4654 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmhv_run_single_vcpu()
4655 vcpu->arch.timer_running = 0; in kvmhv_run_single_vcpu()
4672 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4682 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4683 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4722 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4733 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4746 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4751 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4756 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4759 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4760 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4774 start_timing(vcpu, &vcpu->arch.vcpu_entry); in kvmppc_vcpu_run_hv()
4776 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4806 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4807 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4808 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4814 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4827 (vcpu->arch.hfscr & HFSCR_TM)) in kvmppc_vcpu_run_hv()
4836 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4837 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4838 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4841 accumulate_time(vcpu, &vcpu->arch.guest_entry); in kvmppc_vcpu_run_hv()
4844 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4849 accumulate_time(vcpu, &vcpu->arch.hcall); in kvmppc_vcpu_run_hv()
4851 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
4866 accumulate_time(vcpu, &vcpu->arch.pg_fault); in kvmppc_vcpu_run_hv()
4869 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
4878 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); in kvmppc_vcpu_run_hv()
4880 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
4881 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4993 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4994 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4995 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4996 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5011 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
5012 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
5021 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); in kvmppc_core_prepare_memory_region_hv()
5026 new->arch.rmap = vzalloc(size); in kvmppc_core_prepare_memory_region_hv()
5027 if (!new->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
5030 new->arch.rmap = old->arch.rmap; in kvmppc_core_prepare_memory_region_hv()
5048 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
5069 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
5090 * Update LPCR values in kvm->arch and in vcores.
5091 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5092 * of kvm->arch.lpcr update).
5099 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5102 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5105 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
5113 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
5124 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
5125 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
5127 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
5130 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
5133 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
5134 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
5136 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
5141 * Must be called with kvm->arch.mmu_setup_lock held.
5155 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
5204 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
5216 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5230 * Must be called with kvm->arch.mmu_setup_lock held and
5240 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
5243 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
5257 * Must be called with kvm->arch.mmu_setup_lock held and
5271 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
5273 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
5280 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_switch_mmu_to_radix()
5376 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
5377 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
5378 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
5385 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
5399 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
5402 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
5403 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
5406 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
5410 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
5411 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5422 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
5450 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
5451 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
5456 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_core_init_vm_hv()
5460 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5467 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5470 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
5480 kvm->arch.tlb_sets = 1; in kvmppc_core_init_vm_hv()
5482 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
5484 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
5486 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
5488 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5505 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5507 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5508 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5526 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5527 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5541 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5547 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5548 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5549 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5550 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5553 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5594 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5620 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5628 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5712 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
5715 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
5925 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5927 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
5928 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
5931 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
5932 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
5945 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
5953 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5968 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
5985 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
6004 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
6030 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
6050 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
6053 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6054 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
6055 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
6056 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
6059 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
6060 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
6077 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
6082 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
6099 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6100 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
6101 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
6102 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
6103 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6107 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
6108 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
6110 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6121 kvm->arch.dawr1_enabled = true; in kvmhv_enable_dawr1()