Lines Matching refs:xc

108 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;  in xive_attach_escalation()  local
109 struct xive_q *q = &xc->queues[prio]; in xive_attach_escalation()
114 if (xc->esc_virq[prio]) in xive_attach_escalation()
118 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in xive_attach_escalation()
119 if (!xc->esc_virq[prio]) { in xive_attach_escalation()
121 prio, xc->server_num); in xive_attach_escalation()
125 if (xc->xive->single_escalation) in xive_attach_escalation()
127 vcpu->kvm->arch.lpid, xc->server_num); in xive_attach_escalation()
130 vcpu->kvm->arch.lpid, xc->server_num, prio); in xive_attach_escalation()
133 prio, xc->server_num); in xive_attach_escalation()
138 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in xive_attach_escalation()
140 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in xive_attach_escalation()
144 prio, xc->server_num); in xive_attach_escalation()
147 xc->esc_virq_names[prio] = name; in xive_attach_escalation()
157 if (xc->xive->single_escalation) { in xive_attach_escalation()
158 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in xive_attach_escalation()
169 irq_dispose_mapping(xc->esc_virq[prio]); in xive_attach_escalation()
170 xc->esc_virq[prio] = 0; in xive_attach_escalation()
177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue() local
178 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
179 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
190 prio, xc->server_num); in xive_provision_queue()
202 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
206 prio, xc->server_num); in xive_provision_queue()
245 struct kvmppc_xive_vcpu *xc; in xive_inc_q_pending() local
254 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
255 if (WARN_ON(!xc)) in xive_inc_q_pending()
258 q = &xc->queues[prio]; in xive_inc_q_pending()
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue() local
268 if (WARN_ON(!xc)) in xive_try_pick_queue()
270 if (!xc->valid) in xive_try_pick_queue()
273 q = &xc->queues[prio]; in xive_try_pick_queue()
757 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp() local
759 if (!xc) in kvmppc_xive_get_icp()
763 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
764 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
770 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp() local
775 if (!xc || !xive) in kvmppc_xive_set_icp()
785 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
796 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
804 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
806 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
818 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
991 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts() local
1008 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu() local
1028 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_cleanup_vcpu()
1031 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1034 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1038 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1041 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1045 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1048 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1049 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1050 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1051 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1054 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1063 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1064 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1065 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1068 kfree(xc); in kvmppc_xive_cleanup_vcpu()
1075 struct kvmppc_xive_vcpu *xc; in kvmppc_xive_connect_vcpu() local
1096 xc = kzalloc(sizeof(*xc), GFP_KERNEL); in kvmppc_xive_connect_vcpu()
1097 if (!xc) in kvmppc_xive_connect_vcpu()
1102 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1103 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1104 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1105 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1106 xc->vp_id = xive_vp(xive, cpu); in kvmppc_xive_connect_vcpu()
1107 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1108 xc->valid = true; in kvmppc_xive_connect_vcpu()
1110 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1116 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1119 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1120 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1125 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1127 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1135 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1149 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1163 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
1179 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
1181 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
1303 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan() local
1304 if (!xc) in xive_pre_save_scan()
1307 if (xc->queues[j].qpage) in xive_pre_save_scan()
1308 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
1471 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq() local
1473 if (!xc) in xive_check_delayed_irq()
1476 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
1477 xc->delayed_irq = 0; in xive_check_delayed_irq()
1847 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show() local
1850 if (!xc) in xive_debug_show()
1855 xc->server_num, xc->cppr, xc->hw_cppr, in xive_debug_show()
1856 xc->mfrr, xc->pending, in xive_debug_show()
1857 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
1859 struct xive_q *q = &xc->queues[i]; in xive_debug_show()
1862 if (!q->qpage && !xc->esc_virq[i]) in xive_debug_show()
1874 if (xc->esc_virq[i]) { in xive_debug_show()
1875 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in xive_debug_show()
1881 xc->esc_virq[i], pq, xd->eoi_page); in xive_debug_show()
1886 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
1887 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
1888 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
1889 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
1890 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
1891 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
1892 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
1893 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
1894 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
1895 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()