Lines Matching +full:intc +full:- +full:no +full:- +full:eoi

1 // SPDX-License-Identifier: GPL-2.0-only
9 * Kevin Wolf <mail@kevin-wolf.de>
32 #include <linux/page-flags.h>
49 #include <asm/ppc-opcode.h>
50 #include <asm/asm-prototypes.h>
72 #include <asm/pnv-pci.h>
105 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4,…
167 vcpu = READ_ONCE(vc->runnable_threads[i]); in next_runnable_thread()
178 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
212 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { in kvmppc_ipi_thread()
238 ++vcpu->stat.generic.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
240 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
245 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
264 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
278 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
296 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
297 vc->preempt_tb = tb; in kvmppc_core_start_stolen()
298 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
307 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
308 if (vc->preempt_tb != TB_NIL) { in kvmppc_core_end_stolen()
309 vc->stolen_tb += tb - vc->preempt_tb; in kvmppc_core_end_stolen()
310 vc->preempt_tb = TB_NIL; in kvmppc_core_end_stolen()
312 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
317 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
322 if (vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
323 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); in kvmppc_core_vcpu_load_hv()
324 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
325 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
333 * We can test vc->runner without taking the vcore lock, in kvmppc_core_vcpu_load_hv()
334 * because only this task ever sets vc->runner to this in kvmppc_core_vcpu_load_hv()
338 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
341 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
342 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
343 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
344 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
345 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
347 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
352 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
361 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); in kvmppc_core_vcpu_put_hv()
367 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
368 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
374 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
377 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
378 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
379 vcpu->arch.busy_preempt = now; in kvmppc_core_vcpu_put_hv()
380 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
385 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
394 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
429 return -EINVAL; in kvmppc_set_arch_compat()
435 return -EINVAL; in kvmppc_set_arch_compat()
437 spin_lock(&vc->lock); in kvmppc_set_arch_compat()
438 vc->arch_compat = arch_compat; in kvmppc_set_arch_compat()
443 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; in kvmppc_set_arch_compat()
444 spin_unlock(&vc->lock); in kvmppc_set_arch_compat()
453 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
455 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
461 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
463 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
465 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
467 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
469 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
470 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
472 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
473 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
474 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
476 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
478 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
479 vcpu->arch.last_inst); in kvmppc_dump_regs()
489 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; in init_vpa()
490 vpa->yield_count = cpu_to_be32(1); in init_vpa()
497 if (addr & (L1_CACHE_BYTES - 1)) in set_vpa()
498 return -EINVAL; in set_vpa()
499 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
500 if (v->next_gpa != addr || v->len != len) { in set_vpa()
501 v->next_gpa = addr; in set_vpa()
502 v->len = addr ? len : 0; in set_vpa()
503 v->update_pending = 1; in set_vpa()
505 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
509 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
520 if (vpap->update_pending) in vpa_is_registered()
521 return vpap->next_gpa != 0; in vpa_is_registered()
522 return vpap->pinned_addr != NULL; in vpa_is_registered()
529 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
544 /* Registering new area - address must be cache-line aligned */ in do_h_register_vpa()
545 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) in do_h_register_vpa()
553 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); in do_h_register_vpa()
555 len = be32_to_cpu(((struct reg_vpa *)va)->length.word); in do_h_register_vpa()
568 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
581 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
588 len -= len % sizeof(struct dtl_entry); in do_h_register_vpa()
592 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
595 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
602 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
605 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
612 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
613 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
616 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
621 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
626 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
632 vpap->next_gpa = vpa; in do_h_register_vpa()
633 vpap->len = len; in do_h_register_vpa()
634 vpap->update_pending = 1; in do_h_register_vpa()
637 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
644 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
650 * We need to pin the page pointed to by vpap->next_gpa, in kvmppc_update_vpa()
658 gpa = vpap->next_gpa; in kvmppc_update_vpa()
659 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
664 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
665 if (gpa == vpap->next_gpa) in kvmppc_update_vpa()
672 vpap->update_pending = 0; in kvmppc_update_vpa()
673 if (va && nb < vpap->len) { in kvmppc_update_vpa()
682 if (vpap->pinned_addr) in kvmppc_update_vpa()
683 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, in kvmppc_update_vpa()
684 vpap->dirty); in kvmppc_update_vpa()
685 vpap->gpa = gpa; in kvmppc_update_vpa()
686 vpap->pinned_addr = va; in kvmppc_update_vpa()
687 vpap->dirty = false; in kvmppc_update_vpa()
689 vpap->pinned_end = va + vpap->len; in kvmppc_update_vpa()
694 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
695 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
696 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
699 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
700 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
701 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
702 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
703 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
705 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
706 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
707 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
708 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
710 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
711 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
712 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
726 spin_lock_irqsave(&vc->stoltb_lock, flags); in vcore_stolen_time()
727 p = vc->stolen_tb; in vcore_stolen_time()
728 if (vc->vcore_state != VCORE_INACTIVE && in vcore_stolen_time()
729 vc->preempt_tb != TB_NIL) in vcore_stolen_time()
730 p += now - vc->preempt_tb; in vcore_stolen_time()
731 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in vcore_stolen_time()
742 dt = vcpu->arch.dtl_ptr; in __kvmppc_create_dtl_entry()
747 dt->dispatch_reason = 7; in __kvmppc_create_dtl_entry()
748 dt->preempt_reason = 0; in __kvmppc_create_dtl_entry()
749 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); in __kvmppc_create_dtl_entry()
750 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); in __kvmppc_create_dtl_entry()
751 dt->ready_to_enqueue_time = 0; in __kvmppc_create_dtl_entry()
752 dt->waiting_to_ready_time = 0; in __kvmppc_create_dtl_entry()
753 dt->timebase = cpu_to_be64(now); in __kvmppc_create_dtl_entry()
754 dt->fault_addr = 0; in __kvmppc_create_dtl_entry()
755 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in __kvmppc_create_dtl_entry()
756 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in __kvmppc_create_dtl_entry()
759 if (dt == vcpu->arch.dtl.pinned_end) in __kvmppc_create_dtl_entry()
760 dt = vcpu->arch.dtl.pinned_addr; in __kvmppc_create_dtl_entry()
761 vcpu->arch.dtl_ptr = dt; in __kvmppc_create_dtl_entry()
762 /* order writing *dt vs. writing vpa->dtl_idx */ in __kvmppc_create_dtl_entry()
764 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in __kvmppc_create_dtl_entry()
766 /* vcpu->arch.dtl.dirty is set by the caller */ in __kvmppc_create_dtl_entry()
778 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch()
785 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch()
786 vcpu->arch.stolen_logged = core_stolen; in kvmppc_update_vpa_dispatch()
787 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
788 stolen += vcpu->arch.busy_stolen; in kvmppc_update_vpa_dispatch()
789 vcpu->arch.busy_stolen = 0; in kvmppc_update_vpa_dispatch()
790 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
792 vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); in kvmppc_update_vpa_dispatch()
794 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen); in kvmppc_update_vpa_dispatch()
796 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch()
807 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch_p9()
811 stolen = vc->stolen_tb; in kvmppc_update_vpa_dispatch_p9()
812 stolen_delta = stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch_p9()
813 vcpu->arch.stolen_logged = stolen; in kvmppc_update_vpa_dispatch_p9()
815 vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); in kvmppc_update_vpa_dispatch_p9()
817 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); in kvmppc_update_vpa_dispatch_p9()
819 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch_p9()
828 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
833 * Ensure that the read of vcore->dpdes comes after the read in kvmppc_doorbell_pending()
834 * of vcpu->doorbell_request. This barrier matches the in kvmppc_doorbell_pending()
838 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
839 thr = vcpu->vcpu_id - vc->first_vcpuid; in kvmppc_doorbell_pending()
840 return !!(vc->dpdes & (1 << thr)); in kvmppc_doorbell_pending()
845 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
847 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
868 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
879 vcpu->arch.dawr0 = value1; in kvmppc_h_set_mode()
880 vcpu->arch.dawrx0 = value2; in kvmppc_h_set_mode()
889 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
895 vcpu->arch.dawr1 = value1; in kvmppc_h_set_mode()
896 vcpu->arch.dawrx1 = value2; in kvmppc_h_set_mode()
912 /* Copy guest memory in place - must reside within a single memslot */
924 return -EFAULT; in kvmppc_copy_guest()
925 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest()
927 return -EINVAL; in kvmppc_copy_guest()
930 return -EFAULT; in kvmppc_copy_guest()
931 from_addr |= (from & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
936 return -EFAULT; in kvmppc_copy_guest()
937 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
939 return -EINVAL; in kvmppc_copy_guest()
942 return -EFAULT; in kvmppc_copy_guest()
943 to_addr |= (to & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
949 return -EFAULT; in kvmppc_copy_guest()
958 u64 pg_mask = SZ_4K - 1; in kvmppc_h_page_init()
972 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); in kvmppc_h_page_init()
976 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); in kvmppc_h_page_init()
988 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
998 * mode handler is not called but no other threads are in the in kvm_arch_vcpu_yield_to()
1002 spin_lock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1003 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
1004 vcore->vcore_state != VCORE_INACTIVE && in kvm_arch_vcpu_yield_to()
1005 vcore->runner) in kvm_arch_vcpu_yield_to()
1006 target = vcore->runner; in kvm_arch_vcpu_yield_to()
1007 spin_unlock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1018 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1019 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1021 yield_count = be32_to_cpu(lppaca->yield_count); in kvmppc_get_yield_count()
1022 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1029 * Handles only nested process-scoped invalidation requests in L0.
1037 * The partition-scoped invalidations aren't handled here in L0. in kvmppc_nested_h_rpt_invalidate()
1047 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
1059 if (!kvm_is_radix(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1066 * Partition-scoped invalidation for nested guests. in kvmppc_h_rpt_invalidate()
1069 if (!nesting_enabled(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1081 * Process-scoped invalidation for L1 guests. in kvmppc_h_rpt_invalidate()
1083 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
1090 struct kvm *kvm = vcpu->kvm; in kvmppc_pseries_do_hcall()
1098 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1157 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
1159 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
1164 if (target == -1) in kvmppc_pseries_do_hcall()
1182 if (list_empty(&kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1185 idx = srcu_read_lock(&kvm->srcu); in kvmppc_pseries_do_hcall()
1187 srcu_read_unlock(&kvm->srcu, idx); in kvmppc_pseries_do_hcall()
1189 if (rc == -ENOENT) in kvmppc_pseries_do_hcall()
1268 if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1)) in kvmppc_pseries_do_hcall()
1292 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1293 return -EINTR; in kvmppc_pseries_do_hcall()
1296 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1346 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1357 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1362 * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
1370 vcpu->arch.shregs.msr |= MSR_EE; in kvmppc_cede()
1371 vcpu->arch.ceded = 1; in kvmppc_cede()
1373 if (vcpu->arch.prodded) { in kvmppc_cede()
1374 vcpu->arch.prodded = 0; in kvmppc_cede()
1376 vcpu->arch.ceded = 0; in kvmppc_cede()
1409 /* See if it's in the real-mode table */ in kvmppc_hcall_impl_hv()
1427 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in kvmppc_emulate_debug_inst()
1428 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1446 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1448 cpu = vcpu->vcpu_id & ~(nthreads - 1); in kvmppc_read_dpdes()
1450 v = kvmppc_find_vcpu(vcpu->kvm, cpu); in kvmppc_read_dpdes()
1456 * which will update its vcore->dpdes value. in kvmppc_read_dpdes()
1458 pcpu = READ_ONCE(v->cpu); in kvmppc_read_dpdes()
1468 * On POWER9, emulate doorbell-related instructions in order to
1469 * give the guest the illusion of running on a multi-threaded core.
1477 struct kvm *kvm = vcpu->kvm; in kvmppc_emulate_doorbell_instr()
1485 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1492 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1494 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); in kvmppc_emulate_doorbell_instr()
1497 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1498 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1506 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1507 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1538 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) in kvmppc_pmu_unavailable()
1541 vcpu->arch.hfscr |= HFSCR_PM; in kvmppc_pmu_unavailable()
1548 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) in kvmppc_ebb_unavailable()
1551 vcpu->arch.hfscr |= HFSCR_EBB; in kvmppc_ebb_unavailable()
1558 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) in kvmppc_tm_unavailable()
1561 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_tm_unavailable()
1569 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_hv()
1572 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
1577 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_exit_hv()
1582 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1585 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1586 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1588 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_handle_exit_hv()
1589 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1592 run->exit_reason = KVM_EXIT_UNKNOWN; in kvmppc_handle_exit_hv()
1593 run->ready_for_interrupt_injection = 1; in kvmppc_handle_exit_hv()
1594 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1595 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_exit_hv()
1598 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_exit_hv()
1601 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
1607 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
1624 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1632 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1633 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; in kvmppc_handle_exit_hv()
1640 run->exit_reason = KVM_EXIT_NMI; in kvmppc_handle_exit_hv()
1641 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1642 /* Clear out the old NMI status from run->flags */ in kvmppc_handle_exit_hv()
1643 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; in kvmppc_handle_exit_hv()
1645 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1646 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; in kvmppc_handle_exit_hv()
1648 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; in kvmppc_handle_exit_hv()
1662 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1671 if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_handle_exit_hv()
1698 * hcall - gather args and set exit_reason. This will next be in kvmppc_handle_exit_hv()
1702 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
1704 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
1705 run->exit_reason = KVM_EXIT_PAPR_HCALL; in kvmppc_handle_exit_hv()
1706 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1725 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { in kvmppc_handle_exit_hv()
1730 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1732 * Radix doesn't require anything, and pre-ISAv3.0 hash in kvmppc_handle_exit_hv()
1741 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1743 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1748 if (!(vcpu->arch.shregs.msr & MSR_DR)) in kvmppc_handle_exit_hv()
1749 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1751 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1753 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1754 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1757 } else if (err == -1 || err == -2) { in kvmppc_handle_exit_hv()
1761 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1770 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1771 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & in kvmppc_handle_exit_hv()
1773 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1775 * Radix doesn't require anything, and pre-ISAv3.0 hash in kvmppc_handle_exit_hv()
1780 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1781 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1786 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1788 vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1793 if (!(vcpu->arch.shregs.msr & MSR_IR)) in kvmppc_handle_exit_hv()
1794 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1796 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1798 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1799 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1802 } else if (err == -1) { in kvmppc_handle_exit_hv()
1819 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1820 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1821 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1822 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1823 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
1834 * This occurs for various TM-related instructions that in kvmppc_handle_exit_hv()
1836 * handled the cases where the guest was in real-suspend in kvmppc_handle_exit_hv()
1840 if (r != -1) in kvmppc_handle_exit_hv()
1853 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_exit_hv()
1879 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1880 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1881 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1894 vcpu->stat.sum_exits++; in kvmppc_handle_nested_exit()
1899 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_nested_exit()
1904 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_nested_exit()
1907 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1908 vcpu->arch.shregs.msr); in kvmppc_handle_nested_exit()
1912 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1913 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_nested_exit()
1915 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
1919 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1924 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
1929 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_nested_exit()
1930 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
1947 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
1957 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
1959 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
1962 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
1963 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
1965 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
1966 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
1967 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
1969 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
1975 * This occurs for various TM-related instructions that in kvmppc_handle_nested_exit()
1977 * handled the cases where the guest was in real-suspend in kvmppc_handle_nested_exit()
1981 if (r != -1) in kvmppc_handle_nested_exit()
1987 u64 cause = vcpu->arch.hfscr >> 56; in kvmppc_handle_nested_exit()
1995 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || in kvmppc_handle_nested_exit()
1996 (vcpu->arch.nested_hfscr & (1UL << cause))) { in kvmppc_handle_nested_exit()
1997 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; in kvmppc_handle_nested_exit()
2004 &vcpu->arch.emul_inst); in kvmppc_handle_nested_exit()
2017 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
2028 * guests for process-scoped invalidations when in kvmppc_handle_nested_exit()
2053 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2054 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
2055 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2056 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2068 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
2069 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2072 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2073 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2074 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2075 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2079 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2130 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
2131 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2134 spin_lock(&vc->lock); in kvmppc_set_lpcr()
2138 * DPFD (default prefetch depth), ILE (interrupt little-endian), in kvmppc_set_lpcr()
2145 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
2150 (vc->lpcr & ~mask) | (new_lpcr & mask)); in kvmppc_set_lpcr()
2153 * If ILE (interrupt little-endian) has changed, update the in kvmppc_set_lpcr()
2156 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
2161 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2164 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2166 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2170 vc->lpcr = new_lpcr; in kvmppc_set_lpcr()
2172 spin_unlock(&vc->lock); in kvmppc_set_lpcr()
2189 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2192 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2195 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
2198 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
2201 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
2204 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
2207 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
2210 i = id - KVM_REG_PPC_MMCR0; in kvmppc_get_one_reg_hv()
2211 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
2214 *val = get_reg_val(id, vcpu->arch.mmcr[2]); in kvmppc_get_one_reg_hv()
2217 *val = get_reg_val(id, vcpu->arch.mmcra); in kvmppc_get_one_reg_hv()
2220 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2223 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_get_one_reg_hv()
2226 i = id - KVM_REG_PPC_PMC1; in kvmppc_get_one_reg_hv()
2227 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
2230 i = id - KVM_REG_PPC_SPMC1; in kvmppc_get_one_reg_hv()
2231 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2234 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
2237 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
2240 *val = get_reg_val(id, vcpu->arch.sier[0]); in kvmppc_get_one_reg_hv()
2243 *val = get_reg_val(id, vcpu->arch.sier[1]); in kvmppc_get_one_reg_hv()
2246 *val = get_reg_val(id, vcpu->arch.sier[2]); in kvmppc_get_one_reg_hv()
2249 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
2252 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
2258 * either vcore->dpdes or doorbell_request. in kvmppc_get_one_reg_hv()
2262 *val = get_reg_val(id, vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2264 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2267 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
2270 *val = get_reg_val(id, vcpu->arch.dawr0); in kvmppc_get_one_reg_hv()
2273 *val = get_reg_val(id, vcpu->arch.dawrx0); in kvmppc_get_one_reg_hv()
2276 *val = get_reg_val(id, vcpu->arch.dawr1); in kvmppc_get_one_reg_hv()
2279 *val = get_reg_val(id, vcpu->arch.dawrx1); in kvmppc_get_one_reg_hv()
2282 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
2285 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2288 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2291 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2294 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
2297 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2300 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
2303 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2306 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2309 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2310 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2311 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2314 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2315 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2316 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2317 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2320 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2321 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2322 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2323 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2326 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
2330 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
2333 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
2337 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2340 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2343 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2346 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_get_one_reg_hv()
2347 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2352 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_get_one_reg_hv()
2355 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2358 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2360 r = -ENXIO; in kvmppc_get_one_reg_hv()
2365 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2368 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2371 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2374 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2377 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2380 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2383 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2386 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2390 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2392 r = -ENXIO; in kvmppc_get_one_reg_hv()
2395 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2398 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2402 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
2405 *val = get_reg_val(id, vcpu->arch.dec_expires); in kvmppc_get_one_reg_hv()
2408 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2411 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2414 r = -EINVAL; in kvmppc_get_one_reg_hv()
2432 r = -EINVAL; in kvmppc_set_one_reg_hv()
2435 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2438 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2441 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2444 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2447 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2450 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2453 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2456 i = id - KVM_REG_PPC_MMCR0; in kvmppc_set_one_reg_hv()
2457 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2460 vcpu->arch.mmcr[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2463 vcpu->arch.mmcra = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2466 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2469 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
2472 i = id - KVM_REG_PPC_PMC1; in kvmppc_set_one_reg_hv()
2473 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2476 i = id - KVM_REG_PPC_SPMC1; in kvmppc_set_one_reg_hv()
2477 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2480 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2483 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2486 vcpu->arch.sier[0] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2489 vcpu->arch.sier[1] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2492 vcpu->arch.sier[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2495 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2498 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2502 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; in kvmppc_set_one_reg_hv()
2504 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2507 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2510 vcpu->arch.dawr0 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2513 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2516 vcpu->arch.dawr1 = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2519 vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
2522 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2524 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
2525 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
2528 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2531 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2534 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2537 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2540 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2543 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2546 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2549 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2553 r = -EINVAL; in kvmppc_set_one_reg_hv()
2554 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2555 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2557 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2560 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2561 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2562 r = -EINVAL; in kvmppc_set_one_reg_hv()
2563 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2565 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2568 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2569 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2570 r = -EINVAL; in kvmppc_set_one_reg_hv()
2572 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2574 len -= len % sizeof(struct dtl_entry); in kvmppc_set_one_reg_hv()
2575 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2590 if (!vcpu->arch.dec_expires && tb_offset) in kvmppc_set_one_reg_hv()
2591 vcpu->arch.dec_expires = get_tb() + tb_offset; in kvmppc_set_one_reg_hv()
2593 vcpu->arch.vcore->tb_offset = tb_offset; in kvmppc_set_one_reg_hv()
2603 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2607 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2610 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2613 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2616 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_set_one_reg_hv()
2617 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2622 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_set_one_reg_hv()
2625 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2628 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2630 r = -ENXIO; in kvmppc_set_one_reg_hv()
2634 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2637 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2640 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2643 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2646 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2649 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2652 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2655 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2659 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2661 r = - ENXIO; in kvmppc_set_one_reg_hv()
2664 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2667 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2674 vcpu->arch.dec_expires = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2678 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2679 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2680 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2681 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2682 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2685 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2688 r = -EINVAL; in kvmppc_set_one_reg_hv()
2718 spin_lock_init(&vcore->lock); in kvmppc_vcore_create()
2719 spin_lock_init(&vcore->stoltb_lock); in kvmppc_vcore_create()
2720 rcuwait_init(&vcore->wait); in kvmppc_vcore_create()
2721 vcore->preempt_tb = TB_NIL; in kvmppc_vcore_create()
2722 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2723 vcore->first_vcpuid = id; in kvmppc_vcore_create()
2724 vcore->kvm = kvm; in kvmppc_vcore_create()
2725 INIT_LIST_HEAD(&vcore->preempt_list); in kvmppc_vcore_create()
2762 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open()
2767 return -ENOMEM; in debugfs_timings_open()
2769 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
2770 p->vcpu = vcpu; in debugfs_timings_open()
2771 file->private_data = p; in debugfs_timings_open()
2778 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_release()
2780 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
2788 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_read()
2789 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read()
2798 if (!p->buflen) { in debugfs_timings_read()
2799 s = p->buf; in debugfs_timings_read()
2800 buf_end = s + sizeof(p->buf); in debugfs_timings_read()
2808 count = acc->seqcount; in debugfs_timings_read()
2813 if (count == acc->seqcount) { in debugfs_timings_read()
2821 snprintf(s, buf_end - s, "%s: stuck\n", in debugfs_timings_read()
2824 snprintf(s, buf_end - s, in debugfs_timings_read()
2832 p->buflen = s - p->buf; in debugfs_timings_read()
2836 if (pos >= p->buflen) in debugfs_timings_read()
2838 if (len > p->buflen - pos) in debugfs_timings_read()
2839 len = p->buflen - pos; in debugfs_timings_read()
2840 n = copy_to_user(buf, p->buf + pos, len); in debugfs_timings_read()
2843 return -EFAULT; in debugfs_timings_read()
2844 len -= n; in debugfs_timings_read()
2853 return -EACCES; in debugfs_timings_write()
2889 kvm = vcpu->kvm; in kvmppc_core_vcpu_create_hv()
2890 id = vcpu->vcpu_id; in kvmppc_core_vcpu_create_hv()
2892 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2899 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2901 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2904 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2906 vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT; in kvmppc_core_vcpu_create_hv()
2907 vcpu->arch.mmcra = MMCRA_BHRB_DISABLE; in kvmppc_core_vcpu_create_hv()
2910 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2913 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2914 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2915 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2916 vcpu->arch.shregs.msr = MSR_ME; in kvmppc_core_vcpu_create_hv()
2917 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2926 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
2929 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2932 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2936 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2938 vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; in kvmppc_core_vcpu_create_hv()
2941 * PM, EBB, TM are demand-faulted so start with it clear. in kvmppc_core_vcpu_create_hv()
2943 vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); in kvmppc_core_vcpu_create_hv()
2947 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2949 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2951 mutex_lock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
2953 err = -EINVAL; in kvmppc_core_vcpu_create_hv()
2955 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2959 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2963 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2966 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2975 err = -ENOMEM; in kvmppc_core_vcpu_create_hv()
2977 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2978 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2979 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2980 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2981 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2984 mutex_unlock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
2989 spin_lock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
2990 ++vcore->num_threads; in kvmppc_core_vcpu_create_hv()
2991 spin_unlock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
2992 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2993 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2994 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2995 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2997 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
3010 return -EINVAL; in kvmhv_set_smt_mode()
3012 return -EINVAL; in kvmhv_set_smt_mode()
3019 return -EINVAL; in kvmhv_set_smt_mode()
3028 mutex_lock(&kvm->lock); in kvmhv_set_smt_mode()
3029 err = -EBUSY; in kvmhv_set_smt_mode()
3030 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
3031 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
3032 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
3035 mutex_unlock(&kvm->lock); in kvmhv_set_smt_mode()
3042 if (vpa->pinned_addr) in unpin_vpa()
3043 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, in unpin_vpa()
3044 vpa->dirty); in unpin_vpa()
3049 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3050 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
3051 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
3052 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
3053 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3073 dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); in kvmppc_set_timer()
3074 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
3075 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
3085 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
3087 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3089 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
3090 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
3091 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
3092 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
3093 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3094 --vc->n_runnable; in kvmppc_remove_runnable()
3095 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
3106 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_grab_hwthread()
3107 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_grab_hwthread()
3108 tpaca->kvm_hstate.napping = 0; in kvmppc_grab_hwthread()
3110 tpaca->kvm_hstate.hwthread_req = 1; in kvmppc_grab_hwthread()
3122 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { in kvmppc_grab_hwthread()
3123 if (--timeout <= 0) { in kvmppc_grab_hwthread()
3125 return -EBUSY; in kvmppc_grab_hwthread()
3137 tpaca->kvm_hstate.hwthread_req = 0; in kvmppc_release_hwthread()
3138 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_release_hwthread()
3139 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_release_hwthread()
3140 tpaca->kvm_hstate.kvm_split_mode = NULL; in kvmppc_release_hwthread()
3147 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
3152 need_tlb_flush = &nested->need_tlb_flush; in radix_flush_cpu()
3154 need_tlb_flush = &kvm->arch.need_tlb_flush; in radix_flush_cpu()
3182 struct kvm *kvm = vcpu->kvm; in do_migrate_away_vcpu()
3192 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
3200 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
3201 struct kvm *kvm = vcpu->kvm; in kvmppc_prepare_radix_vcpu()
3208 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
3210 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3233 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3235 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3244 cpu = vc->pcpu; in kvmppc_start_thread()
3246 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3247 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3248 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3250 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3251 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
3252 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3255 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
3256 tpaca->kvm_hstate.ptid = cpu - vc->pcpu; in kvmppc_start_thread()
3257 tpaca->kvm_hstate.fake_suspend = 0; in kvmppc_start_thread()
3260 tpaca->kvm_hstate.kvm_vcore = vc; in kvmppc_start_thread()
3277 * for any threads that still have a non-NULL vcore ptr. in kvmppc_wait_for_nap()
3280 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
3290 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
3296 * this core are off-line. Then grab the threads so they can't
3319 } while (--thr > 0); in on_primary_thread()
3344 spin_lock_init(&lp->lock); in init_vcore_lists()
3345 INIT_LIST_HEAD(&lp->list); in init_vcore_lists()
3355 vc->vcore_state = VCORE_PREEMPT; in kvmppc_vcore_preempt()
3356 vc->pcpu = smp_processor_id(); in kvmppc_vcore_preempt()
3357 if (vc->num_threads < threads_per_vcore(vc->kvm)) { in kvmppc_vcore_preempt()
3358 spin_lock(&lp->lock); in kvmppc_vcore_preempt()
3359 list_add_tail(&vc->preempt_list, &lp->list); in kvmppc_vcore_preempt()
3360 spin_unlock(&lp->lock); in kvmppc_vcore_preempt()
3374 if (!list_empty(&vc->preempt_list)) { in kvmppc_vcore_end_preempt()
3375 lp = &per_cpu(preempted_vcores, vc->pcpu); in kvmppc_vcore_end_preempt()
3376 spin_lock(&lp->lock); in kvmppc_vcore_end_preempt()
3377 list_del_init(&vc->preempt_list); in kvmppc_vcore_end_preempt()
3378 spin_unlock(&lp->lock); in kvmppc_vcore_end_preempt()
3380 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_end_preempt()
3396 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
3397 * respectively in 2-way micro-threading (split-core) mode on POWER8.
3404 cip->n_subcores = 1; in init_core_info()
3405 cip->max_subcore_threads = vc->num_threads; in init_core_info()
3406 cip->total_threads = vc->num_threads; in init_core_info()
3407 cip->subcore_threads[0] = vc->num_threads; in init_core_info()
3408 cip->vc[0] = vc; in init_core_info()
3414 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way in subcore_config_ok()
3415 * split-core mode, with one thread per subcore. in subcore_config_ok()
3437 vc->entry_exit_map = 0; in init_vcore_to_run()
3438 vc->in_guest = 0; in init_vcore_to_run()
3439 vc->napping_threads = 0; in init_vcore_to_run()
3440 vc->conferring_threads = 0; in init_vcore_to_run()
3441 vc->tb_offset_applied = 0; in init_vcore_to_run()
3446 int n_threads = vc->num_threads; in can_dynamic_split()
3453 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) in can_dynamic_split()
3456 if (n_threads < cip->max_subcore_threads) in can_dynamic_split()
3457 n_threads = cip->max_subcore_threads; in can_dynamic_split()
3458 if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) in can_dynamic_split()
3460 cip->max_subcore_threads = n_threads; in can_dynamic_split()
3462 sub = cip->n_subcores; in can_dynamic_split()
3463 ++cip->n_subcores; in can_dynamic_split()
3464 cip->total_threads += vc->num_threads; in can_dynamic_split()
3465 cip->subcore_threads[sub] = vc->num_threads; in can_dynamic_split()
3466 cip->vc[sub] = vc; in can_dynamic_split()
3468 list_del_init(&vc->preempt_list); in can_dynamic_split()
3480 if (cip->total_threads + pvc->num_threads > target_threads) in can_piggyback()
3492 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3493 vcpu->arch.ret = -EINTR; in prepare_threads()
3494 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3495 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3496 vcpu->arch.dtl.update_pending) in prepare_threads()
3497 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3501 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3510 spin_lock(&lp->lock); in collect_piggybacks()
3511 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { in collect_piggybacks()
3512 if (!spin_trylock(&pvc->lock)) in collect_piggybacks()
3515 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
3516 list_del_init(&pvc->preempt_list); in collect_piggybacks()
3517 if (pvc->runner == NULL) { in collect_piggybacks()
3518 pvc->vcore_state = VCORE_INACTIVE; in collect_piggybacks()
3521 spin_unlock(&pvc->lock); in collect_piggybacks()
3525 spin_unlock(&pvc->lock); in collect_piggybacks()
3529 pvc->vcore_state = VCORE_PIGGYBACK; in collect_piggybacks()
3530 if (cip->total_threads >= target_threads) in collect_piggybacks()
3533 spin_unlock(&lp->lock); in collect_piggybacks()
3542 for (sub = 0; sub < cip->n_subcores; ++sub) { in recheck_signals_and_mmu()
3543 vc = cip->vc[sub]; in recheck_signals_and_mmu()
3544 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
3547 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3560 spin_lock(&vc->lock); in post_guest_process()
3570 spin_unlock(&vc->lock); in post_guest_process()
3579 if (vcpu->arch.trap) in post_guest_process()
3581 vcpu->arch.run_task); in post_guest_process()
3583 vcpu->arch.ret = ret; in post_guest_process()
3584 vcpu->arch.trap = 0; in post_guest_process()
3586 spin_lock(&vc->lock); in post_guest_process()
3587 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3588 if (vcpu->arch.pending_exceptions) in post_guest_process()
3590 if (vcpu->arch.ceded) in post_guest_process()
3596 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3602 } else if (vc->runner) { in post_guest_process()
3603 vc->vcore_state = VCORE_PREEMPT; in post_guest_process()
3606 vc->vcore_state = VCORE_INACTIVE; in post_guest_process()
3608 if (vc->n_runnable > 0 && vc->runner == NULL) { in post_guest_process()
3610 i = -1; in post_guest_process()
3612 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3615 spin_unlock(&vc->lock); in post_guest_process()
3635 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; in kvmppc_clear_host_core()
3656 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; in kvmppc_set_host_core()
3664 local_paca->irq_happened |= PACA_IRQ_EE; in set_irq_happened()
3667 local_paca->irq_happened |= PACA_IRQ_DBELL; in set_irq_happened()
3670 local_paca->irq_happened |= PACA_IRQ_HMI; in set_irq_happened()
3680 * Called with vc->lock held.
3709 /* if the runner is no longer runnable, let the caller pick a new one */ in kvmppc_run_core()
3710 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3717 vc->preempt_tb = TB_NIL; in kvmppc_run_core()
3724 controlled_threads = threads_per_vcore(vc->kvm); in kvmppc_run_core()
3732 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { in kvmppc_run_core()
3734 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3736 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3750 if (vc->num_threads < target_threads) in kvmppc_run_core()
3754 * Hard-disable interrupts, and check resched flag and signals. in kvmppc_run_core()
3765 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
3771 spin_unlock(&pvc->lock); in kvmppc_run_core()
3780 /* Decide on micro-threading (split-core) mode */ in kvmppc_run_core()
3818 paca->kvm_hstate.napping = 0; in kvmppc_run_core()
3819 paca->kvm_hstate.kvm_split_mode = sip; in kvmppc_run_core()
3822 /* Initiate micro-threading (split-core) on POWER8 if required */ in kvmppc_run_core()
3845 int n_online = atomic_read(&vc->online_count); in kvmppc_run_core()
3848 * Use the 8-thread value if we're doing split-core in kvmppc_run_core()
3864 pvc->pcpu = pcpu + thr; in kvmppc_run_core()
3868 * It updates vcpu->cpu and vcpu->arch.thread_cpu in kvmppc_run_core()
3877 if (!vcpu->arch.ptid) in kvmppc_run_core()
3879 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3896 * When doing micro-threading, poke the inactive threads as well. in kvmppc_run_core()
3907 vc->vcore_state = VCORE_RUNNING; in kvmppc_run_core()
3913 spin_unlock(&core_info.vc[sub]->lock); in kvmppc_run_core()
3917 srcu_idx = srcu_read_lock(&vc->kvm->srcu); in kvmppc_run_core()
3927 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); in kvmppc_run_core()
3931 spin_lock(&vc->lock); in kvmppc_run_core()
3933 vc->vcore_state = VCORE_EXITING; in kvmppc_run_core()
3938 /* Return to whole-core mode if we split the core earlier */ in kvmppc_run_core()
3979 if (sip && sip->napped[i]) in kvmppc_run_core()
3983 spin_unlock(&vc->lock); in kvmppc_run_core()
3995 spin_lock(&vc->lock); in kvmppc_run_core()
3998 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
4010 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in vcpu_vpa_increment_dispatch()
4012 u32 yield_count = be32_to_cpu(lp->yield_count) + 1; in vcpu_vpa_increment_dispatch()
4013 lp->yield_count = cpu_to_be32(yield_count); in vcpu_vpa_increment_dispatch()
4014 vcpu->arch.vpa.dirty = 1; in vcpu_vpa_increment_dispatch()
4021 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_vcpu_entry_p9_nested()
4048 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4049 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_vcpu_entry_p9_nested()
4054 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_vcpu_entry_p9_nested()
4056 if (vcpu->arch.nested) { in kvmhv_vcpu_entry_p9_nested()
4057 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_vcpu_entry_p9_nested()
4058 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4060 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_vcpu_entry_p9_nested()
4061 hvregs.vcpu_token = vcpu->vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4079 mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); in kvmhv_vcpu_entry_p9_nested()
4081 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_vcpu_entry_p9_nested()
4082 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_vcpu_entry_p9_nested()
4084 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_p9_nested()
4086 __pa(&vcpu->arch.regs)); in kvmhv_vcpu_entry_p9_nested()
4087 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_p9_nested()
4090 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_vcpu_entry_p9_nested()
4091 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_vcpu_entry_p9_nested()
4092 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_vcpu_entry_p9_nested()
4093 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_vcpu_entry_p9_nested()
4101 vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset); in kvmhv_vcpu_entry_p9_nested()
4106 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4118 struct kvm *kvm = vcpu->kvm; in kvmhv_p9_guest_entry()
4119 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_p9_guest_entry()
4131 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4159 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmhv_p9_guest_entry()
4175 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4207 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4224 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4225 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4226 spin_unlock(&vc->lock); in kvmppc_wait_for_exec()
4228 spin_lock(&vc->lock); in kvmppc_wait_for_exec()
4230 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4238 vc->halt_poll_ns *= halt_poll_ns_grow; in grow_halt_poll_ns()
4239 if (vc->halt_poll_ns < halt_poll_ns_grow_start) in grow_halt_poll_ns()
4240 vc->halt_poll_ns = halt_poll_ns_grow_start; in grow_halt_poll_ns()
4246 vc->halt_poll_ns = 0; in shrink_halt_poll_ns()
4248 vc->halt_poll_ns /= halt_poll_ns_shrink; in shrink_halt_poll_ns()
4256 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4257 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4268 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4277 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcpu_check_block()
4284 * exceptions or are no longer ceded
4301 * or external interrupt to one of the vcpus. vc->lock is held.
4313 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4314 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); in kvmppc_vcore_blocked()
4315 ++vc->runner->stat.generic.halt_attempted_poll; in kvmppc_vcore_blocked()
4317 vc->vcore_state = VCORE_POLLING; in kvmppc_vcore_blocked()
4318 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
4328 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
4329 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
4332 ++vc->runner->stat.generic.halt_successful_poll; in kvmppc_vcore_blocked()
4337 prepare_to_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4340 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4343 if (vc->halt_poll_ns) in kvmppc_vcore_blocked()
4344 ++vc->runner->stat.generic.halt_successful_poll; in kvmppc_vcore_blocked()
4350 vc->vcore_state = VCORE_SLEEPING; in kvmppc_vcore_blocked()
4351 trace_kvmppc_vcore_blocked(vc->runner, 0); in kvmppc_vcore_blocked()
4352 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
4354 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4355 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
4356 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
4357 trace_kvmppc_vcore_blocked(vc->runner, 1); in kvmppc_vcore_blocked()
4358 ++vc->runner->stat.halt_successful_wait; in kvmppc_vcore_blocked()
4363 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); in kvmppc_vcore_blocked()
4367 vc->runner->stat.generic.halt_wait_ns += in kvmppc_vcore_blocked()
4368 ktime_to_ns(cur) - ktime_to_ns(start_wait); in kvmppc_vcore_blocked()
4370 vc->runner->stat.generic.halt_wait_hist, in kvmppc_vcore_blocked()
4371 ktime_to_ns(cur) - ktime_to_ns(start_wait)); in kvmppc_vcore_blocked()
4373 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4374 vc->runner->stat.generic.halt_poll_fail_ns += in kvmppc_vcore_blocked()
4375 ktime_to_ns(start_wait) - in kvmppc_vcore_blocked()
4378 vc->runner->stat.generic.halt_poll_fail_hist, in kvmppc_vcore_blocked()
4379 ktime_to_ns(start_wait) - in kvmppc_vcore_blocked()
4384 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4385 vc->runner->stat.generic.halt_poll_success_ns += in kvmppc_vcore_blocked()
4386 ktime_to_ns(cur) - in kvmppc_vcore_blocked()
4389 vc->runner->stat.generic.halt_poll_success_hist, in kvmppc_vcore_blocked()
4390 ktime_to_ns(cur) - ktime_to_ns(start_poll)); in kvmppc_vcore_blocked()
4396 if (block_ns <= vc->halt_poll_ns) in kvmppc_vcore_blocked()
4399 else if (vc->halt_poll_ns && block_ns > halt_poll_ns) in kvmppc_vcore_blocked()
4402 else if (vc->halt_poll_ns < halt_poll_ns && in kvmppc_vcore_blocked()
4405 if (vc->halt_poll_ns > halt_poll_ns) in kvmppc_vcore_blocked()
4406 vc->halt_poll_ns = halt_poll_ns; in kvmppc_vcore_blocked()
4408 vc->halt_poll_ns = 0; in kvmppc_vcore_blocked()
4420 struct kvm *kvm = vcpu->kvm; in kvmhv_setup_mmu()
4422 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4423 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
4429 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
4432 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4438 struct kvm_run *run = vcpu->run; in kvmppc_run_vcpu()
4445 run->exit_reason = 0; in kvmppc_run_vcpu()
4446 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4447 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4453 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4454 spin_lock(&vc->lock); in kvmppc_run_vcpu()
4455 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4456 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4457 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4458 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4459 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4460 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4461 ++vc->n_runnable; in kvmppc_run_vcpu()
4469 if ((vc->vcore_state == VCORE_PIGGYBACK || in kvmppc_run_vcpu()
4470 vc->vcore_state == VCORE_RUNNING) && in kvmppc_run_vcpu()
4475 } else if (vc->vcore_state == VCORE_SLEEPING) { in kvmppc_run_vcpu()
4476 rcuwait_wake_up(&vc->wait); in kvmppc_run_vcpu()
4481 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4484 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4485 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4487 spin_lock(&vc->lock); in kvmppc_run_vcpu()
4489 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_run_vcpu()
4490 run->fail_entry. in kvmppc_run_vcpu()
4492 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4497 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4500 if (vc->vcore_state != VCORE_INACTIVE) { in kvmppc_run_vcpu()
4506 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4508 v->stat.signal_exits++; in kvmppc_run_vcpu()
4509 v->run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4510 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4511 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4514 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4519 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4521 v->arch.ceded = 0; in kvmppc_run_vcpu()
4523 vc->runner = vcpu; in kvmppc_run_vcpu()
4524 if (n_ceded == vc->n_runnable) { in kvmppc_run_vcpu()
4529 cond_resched_lock(&vc->lock); in kvmppc_run_vcpu()
4530 if (vc->vcore_state == VCORE_PREEMPT) in kvmppc_run_vcpu()
4535 vc->runner = NULL; in kvmppc_run_vcpu()
4538 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4539 (vc->vcore_state == VCORE_RUNNING || in kvmppc_run_vcpu()
4540 vc->vcore_state == VCORE_EXITING || in kvmppc_run_vcpu()
4541 vc->vcore_state == VCORE_PIGGYBACK)) in kvmppc_run_vcpu()
4544 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4547 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4549 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
4550 run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4551 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4554 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { in kvmppc_run_vcpu()
4556 i = -1; in kvmppc_run_vcpu()
4558 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4562 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4563 return vcpu->arch.ret; in kvmppc_run_vcpu()
4570 struct kvm_run *run = vcpu->run; in kvmhv_run_single_vcpu()
4574 struct kvm *kvm = vcpu->kvm; in kvmhv_run_single_vcpu()
4575 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4581 run->exit_reason = 0; in kvmhv_run_single_vcpu()
4582 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4583 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4585 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4586 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4587 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4588 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4591 if (unlikely(!kvm->arch.mmu_ready)) { in kvmhv_run_single_vcpu()
4594 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmhv_run_single_vcpu()
4595 run->fail_entry.hardware_entry_failure_reason = 0; in kvmhv_run_single_vcpu()
4596 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4611 /* flags save not required, but irq_pmu has no disable/enable API */ in kvmhv_run_single_vcpu()
4614 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4618 if (need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4621 vcpu->cpu = pcpu; in kvmhv_run_single_vcpu()
4622 vcpu->arch.thread_cpu = pcpu; in kvmhv_run_single_vcpu()
4623 vc->pcpu = pcpu; in kvmhv_run_single_vcpu()
4624 local_paca->kvm_hstate.kvm_vcpu = vcpu; in kvmhv_run_single_vcpu()
4625 local_paca->kvm_hstate.ptid = 0; in kvmhv_run_single_vcpu()
4626 local_paca->kvm_hstate.fake_suspend = 0; in kvmhv_run_single_vcpu()
4638 if (vcpu->arch.shregs.msr & MSR_EE) { in kvmhv_run_single_vcpu()
4643 &vcpu->arch.pending_exceptions)) { in kvmhv_run_single_vcpu()
4646 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4647 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4649 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4653 if (vcpu->arch.timer_running) { in kvmhv_run_single_vcpu()
4654 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmhv_run_single_vcpu()
4655 vcpu->arch.timer_running = 0; in kvmhv_run_single_vcpu()
4660 kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset); in kvmhv_run_single_vcpu()
4666 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_run_single_vcpu()
4672 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4677 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_run_single_vcpu()
4681 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
4682 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4683 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4722 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4731 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4732 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
4733 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4746 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4751 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4754 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
4755 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
4756 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4758 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
4759 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
4760 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
4768 struct kvm_run *run = vcpu->run; in kvmppc_vcpu_run_hv()
4774 start_timing(vcpu, &vcpu->arch.vcpu_entry); in kvmppc_vcpu_run_hv()
4776 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4777 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_vcpu_run_hv()
4778 return -EINVAL; in kvmppc_vcpu_run_hv()
4781 /* No need to go into the guest when all we'll do is come back out */ in kvmppc_vcpu_run_hv()
4783 run->exit_reason = KVM_EXIT_INTR; in kvmppc_vcpu_run_hv()
4784 return -EINTR; in kvmppc_vcpu_run_hv()
4792 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && in kvmppc_vcpu_run_hv()
4793 (current->thread.regs->msr & MSR_TM)) { in kvmppc_vcpu_run_hv()
4794 if (MSR_TM_ACTIVE(current->thread.regs->msr)) { in kvmppc_vcpu_run_hv()
4795 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_vcpu_run_hv()
4796 run->fail_entry.hardware_entry_failure_reason = 0; in kvmppc_vcpu_run_hv()
4797 return -EINVAL; in kvmppc_vcpu_run_hv()
4806 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4807 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4808 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4813 kvm = vcpu->kvm; in kvmppc_vcpu_run_hv()
4814 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4827 (vcpu->arch.hfscr & HFSCR_TM)) in kvmppc_vcpu_run_hv()
4836 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4837 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4838 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4841 accumulate_time(vcpu, &vcpu->arch.guest_entry); in kvmppc_vcpu_run_hv()
4844 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4848 if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { in kvmppc_vcpu_run_hv()
4849 accumulate_time(vcpu, &vcpu->arch.hcall); in kvmppc_vcpu_run_hv()
4851 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
4866 accumulate_time(vcpu, &vcpu->arch.pg_fault); in kvmppc_vcpu_run_hv()
4867 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_vcpu_run_hv()
4869 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
4870 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_vcpu_run_hv()
4878 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); in kvmppc_vcpu_run_hv()
4880 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
4881 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4893 (*sps)->page_shift = shift; in kvmppc_add_seg_page_size()
4894 (*sps)->slb_enc = sllp; in kvmppc_add_seg_page_size()
4895 (*sps)->enc[0].page_shift = shift; in kvmppc_add_seg_page_size()
4896 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); in kvmppc_add_seg_page_size()
4902 if (penc != -1) { in kvmppc_add_seg_page_size()
4903 (*sps)->enc[1].page_shift = 24; in kvmppc_add_seg_page_size()
4904 (*sps)->enc[1].pte_enc = penc; in kvmppc_add_seg_page_size()
4920 info->data_keys = 32; in kvm_vm_ioctl_get_smmu_info_hv()
4921 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; in kvm_vm_ioctl_get_smmu_info_hv()
4923 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ in kvm_vm_ioctl_get_smmu_info_hv()
4924 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; in kvm_vm_ioctl_get_smmu_info_hv()
4925 info->slb_size = 32; in kvm_vm_ioctl_get_smmu_info_hv()
4927 /* We only support these sizes for now, and no muti-size segments */ in kvm_vm_ioctl_get_smmu_info_hv()
4928 sps = &info->sps[0]; in kvm_vm_ioctl_get_smmu_info_hv()
4935 info->flags |= KVM_PPC_NO_HASH; in kvm_vm_ioctl_get_smmu_info_hv()
4953 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4955 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log_hv()
4956 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log_hv()
4960 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv()
4961 r = -ENOENT; in kvm_vm_ioctl_get_dirty_log_hv()
4962 if (!memslot || !memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv()
4970 buf = memslot->dirty_bitmap + n / sizeof(long); in kvm_vm_ioctl_get_dirty_log_hv()
4986 p = memslot->dirty_bitmap; in kvm_vm_ioctl_get_dirty_log_hv()
4993 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4994 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4995 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4996 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4999 r = -EFAULT; in kvm_vm_ioctl_get_dirty_log_hv()
5000 if (copy_to_user(log->dirty_bitmap, buf, n)) in kvm_vm_ioctl_get_dirty_log_hv()
5005 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5011 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
5012 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
5021 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); in kvmppc_core_prepare_memory_region_hv()
5024 return -ENOMEM; in kvmppc_core_prepare_memory_region_hv()
5026 new->arch.rmap = vzalloc(size); in kvmppc_core_prepare_memory_region_hv()
5027 if (!new->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
5028 return -ENOMEM; in kvmppc_core_prepare_memory_region_hv()
5030 new->arch.rmap = old->arch.rmap; in kvmppc_core_prepare_memory_region_hv()
5044 * MMIO be no longer emulated MMIO, so invalidate in kvmppc_core_commit_memory_region_hv()
5048 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
5053 * flush shadow mappings. For KVM_MR_CREATE we have no in kvmppc_core_commit_memory_region_hv()
5058 * to get rid of any THP PTEs in the partition-scoped page tables in kvmppc_core_commit_memory_region_hv()
5064 ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) in kvmppc_core_commit_memory_region_hv()
5069 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
5090 * Update LPCR values in kvm->arch and in vcores.
5091 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5092 * of kvm->arch.lpcr update).
5099 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5102 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5105 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
5109 spin_lock(&vc->lock); in kvmppc_update_lpcr()
5110 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5111 verify_lpcr(kvm, vc->lpcr); in kvmppc_update_lpcr()
5112 spin_unlock(&vc->lock); in kvmppc_update_lpcr()
5113 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
5123 /* PS field - page size for VRMA */ in kvmppc_setup_partition_table()
5124 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
5125 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
5127 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
5130 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
5133 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
5134 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
5136 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
5140 * Set up HPT (hashed page table) and RMA (real-mode area).
5141 * Must be called with kvm->arch.mmu_setup_lock held.
5146 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
5155 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
5163 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) in kvmppc_hv_setup_htab_rma()
5175 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_hv_setup_htab_rma()
5179 err = -EINVAL; in kvmppc_hv_setup_htab_rma()
5180 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_hv_setup_htab_rma()
5184 hva = memslot->userspace_addr; in kvmppc_hv_setup_htab_rma()
5185 mmap_read_lock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5186 vma = vma_lookup(kvm->mm, hva); in kvmppc_hv_setup_htab_rma()
5187 if (!vma || (vma->vm_flags & VM_IO)) in kvmppc_hv_setup_htab_rma()
5192 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5204 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
5211 /* the -4 is to account for senc values starting at 0x10 */ in kvmppc_hv_setup_htab_rma()
5212 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
5216 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5220 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_hv_setup_htab_rma()
5225 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5230 * Must be called with kvm->arch.mmu_setup_lock held and
5231 * mmu_ready = 0 and no vcpus running.
5240 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
5242 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
5243 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
5244 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
5257 * Must be called with kvm->arch.mmu_setup_lock held and
5258 * mmu_ready = 0 and no vcpus running.
5270 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
5271 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
5272 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
5273 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
5280 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_switch_mmu_to_radix()
5290 * Allocate a per-core structure for managing state about which cores are
5295 * It is only freed when the kvm-hv module is unloaded.
5318 ops->rm_core = kzalloc(size, GFP_KERNEL); in kvmppc_alloc_host_rm_ops()
5320 if (!ops->rm_core) { in kvmppc_alloc_host_rm_ops()
5332 ops->rm_core[core].rm_state.in_host = 1; in kvmppc_alloc_host_rm_ops()
5335 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; in kvmppc_alloc_host_rm_ops()
5340 * Do an atomic assignment (no locks used here), but if someone in kvmppc_alloc_host_rm_ops()
5348 kfree(ops->rm_core); in kvmppc_alloc_host_rm_ops()
5364 kfree(kvmppc_host_rm_ops_hv->rm_core); in kvmppc_free_host_rm_ops()
5376 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
5377 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
5378 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
5384 return -ENOMEM; in kvmppc_core_init_vm_hv()
5385 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
5399 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
5402 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
5403 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
5406 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
5410 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
5411 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5422 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
5431 * be unnecessary but better safe than sorry in case we re-enable in kvmppc_core_init_vm_hv()
5450 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
5451 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
5456 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_core_init_vm_hv()
5460 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5467 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5470 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
5480 kvm->arch.tlb_sets = 1; in kvmppc_core_init_vm_hv()
5482 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
5484 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
5486 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
5488 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5505 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5507 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5508 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5526 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5527 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5541 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5547 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5548 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5549 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5550 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5553 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5587 return -EIO; in kvmppc_core_check_processor_compat_hv()
5594 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5616 return -EIO; in kvmppc_set_passthru_irq()
5618 mutex_lock(&kvm->lock); in kvmppc_set_passthru_irq()
5620 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5625 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5626 return -ENOMEM; in kvmppc_set_passthru_irq()
5628 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5632 * For now, we only support interrupts for which the EOI operation in kvmppc_set_passthru_irq()
5634 * what our real-mode EOI code does, or a XIVE interrupt in kvmppc_set_passthru_irq()
5636 chip = irq_data_get_irq_chip(&desc->irq_data); in kvmppc_set_passthru_irq()
5640 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5641 return -ENOENT; in kvmppc_set_passthru_irq()
5647 * otherwise re-use this entry. in kvmppc_set_passthru_irq()
5649 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_set_passthru_irq()
5650 if (guest_gsi == pimap->mapped[i].v_hwirq) { in kvmppc_set_passthru_irq()
5651 if (pimap->mapped[i].r_hwirq) { in kvmppc_set_passthru_irq()
5652 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5653 return -EINVAL; in kvmppc_set_passthru_irq()
5660 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5661 return -EAGAIN; /* table is full */ in kvmppc_set_passthru_irq()
5664 irq_map = &pimap->mapped[i]; in kvmppc_set_passthru_irq()
5666 irq_map->v_hwirq = guest_gsi; in kvmppc_set_passthru_irq()
5667 irq_map->desc = desc; in kvmppc_set_passthru_irq()
5676 * The 'host_irq' number is mapped in the PCI-MSI domain but in kvmppc_set_passthru_irq()
5677 * the underlying calls, which will EOI the interrupt in real in kvmppc_set_passthru_irq()
5681 irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); in kvmppc_set_passthru_irq()
5683 if (i == pimap->n_mapped) in kvmppc_set_passthru_irq()
5684 pimap->n_mapped++; in kvmppc_set_passthru_irq()
5689 kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq); in kvmppc_set_passthru_irq()
5691 irq_map->r_hwirq = 0; in kvmppc_set_passthru_irq()
5693 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5709 return -EIO; in kvmppc_clr_passthru_irq()
5711 mutex_lock(&kvm->lock); in kvmppc_clr_passthru_irq()
5712 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
5715 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
5717 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_clr_passthru_irq()
5718 if (guest_gsi == pimap->mapped[i].v_hwirq) in kvmppc_clr_passthru_irq()
5722 if (i == pimap->n_mapped) { in kvmppc_clr_passthru_irq()
5723 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
5724 return -ENODEV; in kvmppc_clr_passthru_irq()
5730 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); in kvmppc_clr_passthru_irq()
5733 pimap->mapped[i].r_hwirq = 0; in kvmppc_clr_passthru_irq()
5740 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
5751 irqfd->producer = prod; in kvmppc_irq_bypass_add_producer_hv()
5753 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_add_producer_hv()
5756 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_add_producer_hv()
5768 irqfd->producer = NULL; in kvmppc_irq_bypass_del_producer_hv()
5772 * default external interrupt handling mode - KVM real mode in kvmppc_irq_bypass_del_producer_hv()
5775 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_del_producer_hv()
5778 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_del_producer_hv()
5785 struct kvm *kvm __maybe_unused = filp->private_data; in kvm_arch_vm_ioctl_hv()
5796 r = -EOPNOTSUPP; in kvm_arch_vm_ioctl_hv()
5800 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5813 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5823 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5834 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
5843 r = -ENOTTY; in kvm_arch_vm_ioctl_hv()
5852 * all hcalls that were implemented before the hcall-enabling
5902 return -ENODEV; in kvmhv_configure_mmu()
5905 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) in kvmhv_configure_mmu()
5906 return -EINVAL; in kvmhv_configure_mmu()
5909 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); in kvmhv_configure_mmu()
5910 if (!!(cfg->process_table & PATB_GR) != radix) in kvmhv_configure_mmu()
5911 return -EINVAL; in kvmhv_configure_mmu()
5914 if ((cfg->process_table & PRTS_MASK) > 24) in kvmhv_configure_mmu()
5915 return -EINVAL; in kvmhv_configure_mmu()
5919 return -EINVAL; in kvmhv_configure_mmu()
5923 return -EINVAL; in kvmhv_configure_mmu()
5925 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5927 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
5928 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
5931 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
5932 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
5933 err = -EBUSY; in kvmhv_configure_mmu()
5945 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
5948 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
5953 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5960 return -EPERM; in kvmhv_enable_nested()
5962 return -ENODEV; in kvmhv_enable_nested()
5964 return -ENODEV; in kvmhv_enable_nested()
5968 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
5975 int rc = -EINVAL; in kvmhv_load_from_eaddr()
5981 rc = -EINVAL; in kvmhv_load_from_eaddr()
5985 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
5986 rc = -EAGAIN; in kvmhv_load_from_eaddr()
5994 int rc = -EINVAL; in kvmhv_store_to_eaddr()
6000 rc = -EINVAL; in kvmhv_store_to_eaddr()
6004 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
6005 rc = -EAGAIN; in kvmhv_store_to_eaddr()
6013 vpa->gpa = 0; in unpin_vpa_reset()
6014 vpa->pinned_addr = NULL; in unpin_vpa_reset()
6015 vpa->dirty = false; in unpin_vpa_reset()
6016 vpa->update_pending = 0; in unpin_vpa_reset()
6028 return -EINVAL; in kvmhv_enable_svm()
6030 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
6037 * - Release all device pages
6038 * - Issue ucall to terminate the guest on the UV side
6039 * - Unpin the VPA pages.
6040 * - Reinit the partition scoped page tables
6050 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
6053 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6054 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
6055 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
6056 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
6059 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
6060 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
6061 ret = -EBUSY; in kvmhv_svm_off()
6066 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_svm_off()
6077 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
6080 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_svm_off()
6082 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
6084 ret = -EINVAL; in kvmhv_svm_off()
6090 * to UV via UV_PAGE_IN before the non-boot vcpus get a in kvmhv_svm_off()
6099 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6100 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
6101 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
6102 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
6103 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6107 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
6108 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
6110 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6117 return -ENODEV; in kvmhv_enable_dawr1()
6121 kvm->arch.dawr1_enabled = true; in kvmhv_enable_dawr1()
6208 if (paca_ptrs[first_cpu]->sibling_subcore_state) in kvm_init_subcore_bitmap()
6215 return -ENOMEM; in kvm_init_subcore_bitmap()
6221 paca_ptrs[cpu]->sibling_subcore_state = in kvm_init_subcore_bitmap()
6238 pr_err("KVM-HV: Host does not support TLBIE\n"); in kvmppc_book3s_init_hv()
6239 return -ENODEV; in kvmppc_book3s_init_hv()
6247 return -ENODEV; in kvmppc_book3s_init_hv()
6261 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or in kvmppc_book3s_init_hv()
6266 !local_paca->kvm_hstate.xics_phys) { in kvmppc_book3s_init_hv()
6269 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); in kvmppc_book3s_init_hv()
6271 pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); in kvmppc_book3s_init_hv()
6272 r = -ENODEV; in kvmppc_book3s_init_hv()
6275 /* presence of intc confirmed - node can be dropped again */ in kvmppc_book3s_init_hv()
6296 pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); in kvmppc_book3s_init_hv()