Lines Matching refs:arch
223 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
289 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
301 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
302 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
303 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
304 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
305 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
307 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
312 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
318 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
319 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
320 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
321 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
332 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
338 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
347 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
400 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
406 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
408 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
410 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
412 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
414 vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
415 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
417 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
418 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
419 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
421 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
423 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
424 vcpu->arch.last_inst); in kvmppc_dump_regs()
449 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
455 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
518 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
531 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
542 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
545 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
552 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
555 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
562 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
563 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
566 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
571 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
576 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
587 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
609 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
614 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
644 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
645 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
646 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
649 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
650 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
651 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
652 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
653 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
655 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
656 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
657 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
658 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
660 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
661 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
662 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
693 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
694 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
697 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
698 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
699 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
700 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
701 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
702 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
707 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
711 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
713 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
714 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
715 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
718 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
719 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
728 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
737 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
744 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
746 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
767 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
778 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
779 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
788 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
799 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
813 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
814 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
817 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
830 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
843 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
845 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
868 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
942 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
987 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1005 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1044 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1051 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1056 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1057 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1065 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1066 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1104 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1107 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1108 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1111 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1116 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1137 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1141 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1148 machine_check_print_event_info(&vcpu->arch.mce_evt, false); in kvmppc_handle_exit_hv()
1159 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1177 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1192 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1193 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
1204 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1205 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1206 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1207 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1210 spin_unlock(&vcpu->arch.vcore->lock); in kvmppc_handle_exit_hv()
1212 spin_lock(&vcpu->arch.vcore->lock); in kvmppc_handle_exit_hv()
1227 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && in kvmppc_handle_exit_hv()
1230 spin_unlock(&vcpu->arch.vcore->lock); in kvmppc_handle_exit_hv()
1232 spin_lock(&vcpu->arch.vcore->lock); in kvmppc_handle_exit_hv()
1258 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1259 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1260 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1274 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1275 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
1276 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1277 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1289 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
1293 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
1295 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1296 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1300 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1309 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1323 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1326 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1328 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1369 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1372 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1375 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1378 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1381 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1384 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1387 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1391 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1395 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1399 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1402 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1405 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1408 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1411 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1414 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1417 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1420 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
1423 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1426 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1429 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1432 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1435 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1438 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1441 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1444 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1447 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1450 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
1453 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
1456 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1457 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1458 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1461 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1462 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1463 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1464 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1467 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1468 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1469 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1470 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1473 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1477 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1480 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1484 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1487 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1490 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1494 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1502 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1505 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1512 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1515 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
1518 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1521 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1524 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1527 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1530 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1533 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1537 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1542 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1545 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1549 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1552 *val = get_reg_val(id, vcpu->arch.dec_expires + in kvmppc_get_one_reg_hv()
1553 vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1556 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
1580 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1583 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1586 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1589 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1592 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1595 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1598 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1602 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1606 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1610 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1613 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1616 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1619 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1622 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1625 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1628 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1631 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1634 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1637 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1640 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1642 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1643 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1646 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1649 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1652 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1655 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1658 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1661 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1664 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1667 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
1672 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1673 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1675 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1681 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1683 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1690 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1693 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1697 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1707 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1711 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1714 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1717 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1721 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1729 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1732 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1738 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1741 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1744 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1747 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1750 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1753 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1756 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1759 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1763 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1768 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1771 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1778 vcpu->arch.dec_expires = set_reg_val(id, *val) - in kvmppc_set_one_reg_hv()
1779 vcpu->arch.vcore->tb_offset; in kvmppc_set_one_reg_hv()
1783 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
1784 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
1785 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
1786 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
1787 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
1806 if (kvm->arch.threads_indep) in threads_per_vcore()
1824 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
1837 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1838 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1839 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1840 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1841 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1964 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) in debugfs_vcpu_init()
1966 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
1967 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) in debugfs_vcpu_init()
1969 vcpu->arch.debugfs_timings = in debugfs_vcpu_init()
1970 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, in debugfs_vcpu_init()
1997 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2004 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2006 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2009 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2010 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2013 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2014 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2015 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2016 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2024 vcpu->arch.hfscr = mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2026 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2028 vcpu->arch.hfscr &= ~HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2030 vcpu->arch.hfscr &= ~HFSCR_MSGP; in kvmppc_core_vcpu_create_hv()
2034 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2036 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2042 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2046 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2050 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2053 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2060 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2061 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2062 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2073 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2074 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2075 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2076 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2078 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
2118 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
2119 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
2120 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
2137 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2138 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
2139 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
2140 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
2141 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2157 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
2163 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
2165 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
2166 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
2171 vcpu->arch.ceded = 0; in kvmppc_end_cede()
2172 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
2173 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
2174 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
2185 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
2187 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2189 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
2190 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
2191 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
2192 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
2193 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2195 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
2248 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); in radix_flush_cpu()
2256 if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest)) in radix_flush_cpu()
2276 if (vcpu->arch.prev_cpu != pcpu) { in kvmppc_prepare_radix_vcpu()
2277 if (vcpu->arch.prev_cpu >= 0 && in kvmppc_prepare_radix_vcpu()
2278 cpu_first_thread_sibling(vcpu->arch.prev_cpu) != in kvmppc_prepare_radix_vcpu()
2280 radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu); in kvmppc_prepare_radix_vcpu()
2281 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
2293 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
2294 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
2295 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
2297 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
2299 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
2300 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); in kvmppc_start_thread()
2537 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
2538 vcpu->arch.ret = -EINTR; in prepare_threads()
2539 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
2540 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
2541 vcpu->arch.dtl.update_pending) in prepare_threads()
2542 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
2546 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
2588 if (signal_pending(vcpu->arch.run_task)) in recheck_signals()
2604 if (now < vcpu->arch.dec_expires && in post_guest_process()
2611 if (vcpu->arch.trap) in post_guest_process()
2612 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in post_guest_process()
2613 vcpu->arch.run_task); in post_guest_process()
2615 vcpu->arch.ret = ret; in post_guest_process()
2616 vcpu->arch.trap = 0; in post_guest_process()
2618 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
2619 if (vcpu->arch.pending_exceptions) in post_guest_process()
2621 if (vcpu->arch.ceded) in post_guest_process()
2627 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2643 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2739 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
2767 (hpt_on_radix && vc->kvm->arch.threads_indep)) { in kvmppc_run_core()
2769 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
2771 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
2810 recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) { in kvmppc_run_core()
2860 split_info.lpidr_req = vc->kvm->arch.lpid; in kvmppc_run_core()
2861 split_info.host_lpcr = vc->kvm->arch.host_lpcr; in kvmppc_run_core()
2925 if (!vcpu->arch.ptid) in kvmppc_run_core()
2927 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
2978 mtspr(SPRN_LPID, vc->kvm->arch.lpid); in kvmppc_run_core()
2984 if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { in kvmppc_run_core()
2985 radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); in kvmppc_run_core()
2987 cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); in kvmppc_run_core()
3060 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); in kvmppc_run_core()
3091 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
3092 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
3097 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
3122 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
3123 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
3134 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
3151 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcore_check_block()
3265 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
3271 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
3287 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
3288 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
3294 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
3296 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
3297 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
3298 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
3299 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
3300 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
3301 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
3302 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
3323 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
3326 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
3334 vcpu->arch.ret = r; in kvmppc_run_vcpu()
3348 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
3351 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
3352 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
3353 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
3356 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
3361 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
3363 v->arch.ceded = 0; in kvmppc_run_vcpu()
3380 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
3389 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
3393 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
3400 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
3405 return vcpu->arch.ret; in kvmppc_run_vcpu()
3417 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
3449 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
3450 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
3451 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
3463 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
3478 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
3479 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
3480 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
3486 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
3494 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
3514 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
3515 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
3618 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
3619 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
3620 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
3621 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
3637 if (!dont || free->arch.rmap != dont->arch.rmap) { in kvmppc_core_free_memslot_hv()
3638 vfree(free->arch.rmap); in kvmppc_core_free_memslot_hv()
3639 free->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
3646 slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap))); in kvmppc_core_create_memslot_hv()
3647 if (!slot->arch.rmap) in kvmppc_core_create_memslot_hv()
3674 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
3686 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
3689 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
3692 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
3698 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
3714 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
3715 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
3717 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
3720 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
3723 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
3724 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
3727 mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
3746 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
3795 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
3827 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
3828 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
3841 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
3844 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
3939 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
3951 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
3954 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
3955 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
3958 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
3961 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
3962 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
3966 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
3994 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
3995 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
4000 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
4006 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
4009 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
4016 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
4018 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
4020 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
4022 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
4031 kvm->arch.threads_indep = indep_threads_mode; in kvmppc_core_init_vm_hv()
4032 if (!kvm->arch.threads_indep) in kvmppc_core_init_vm_hv()
4043 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
4045 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
4046 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
4052 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
4063 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
4064 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
4069 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()
4071 if (!kvm->arch.threads_indep) in kvmppc_core_destroy_vm_hv()
4076 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
4081 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
4118 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
4143 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
4151 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
4228 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
4231 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
4431 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
4432 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
4435 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
4436 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
4449 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()