Lines Matching +full:0 +full:xc
91 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
124 /* Now P is 0, we can clear the flag */ in kvmppc_xive_push_vcpu()
125 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
144 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
178 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation() local
179 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
184 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
185 return 0; in kvmppc_xive_attach_escalation()
188 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
189 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
191 prio, xc->server_num); in kvmppc_xive_attach_escalation()
197 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
200 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
203 prio, xc->server_num); in kvmppc_xive_attach_escalation()
208 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
210 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
214 prio, xc->server_num); in kvmppc_xive_attach_escalation()
217 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
228 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
237 return 0; in kvmppc_xive_attach_escalation()
239 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
240 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
247 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue() local
248 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
249 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
254 return 0; in xive_provision_queue()
260 prio, xc->server_num); in xive_provision_queue()
263 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
267 * queue is fully configured. This is a requirement for prio 0 in xive_provision_queue()
270 * corresponding queue 0 entries in xive_provision_queue()
272 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
276 prio, xc->server_num); in xive_provision_queue()
291 return 0; in xive_check_provisioning()
300 if (rc == 0 && !xive->single_escalation) in xive_check_provisioning()
310 return 0; in xive_check_provisioning()
316 struct kvmppc_xive_vcpu *xc; in xive_inc_q_pending() local
325 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
326 if (WARN_ON(!xc)) in xive_inc_q_pending()
329 q = &xc->queues[prio]; in xive_inc_q_pending()
335 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue() local
339 if (WARN_ON(!xc)) in xive_try_pick_queue()
341 if (!xc->valid) in xive_try_pick_queue()
344 q = &xc->queues[prio]; in xive_try_pick_queue()
350 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
365 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in kvmppc_xive_select_target()
369 if (rc == 0) in kvmppc_xive_select_target()
379 if (rc == 0) { in kvmppc_xive_select_target()
381 pr_devel(" found on 0x%x/%d\n", *server, prio); in kvmppc_xive_select_target()
620 int rc = 0; in kvmppc_xive_set_xive()
626 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", in kvmppc_xive_set_xive()
682 * we have a valid new priority (new_act_prio is not 0xff) in kvmppc_xive_set_xive()
734 return 0; in kvmppc_xive_get_xive()
752 pr_devel("int_on(irq=0x%x)\n", irq); in kvmppc_xive_int_on()
762 /* If saved_priority is 0xff, do nothing */ in kvmppc_xive_int_on()
764 return 0; in kvmppc_xive_int_on()
773 return 0; in kvmppc_xive_int_on()
791 pr_devel("int_off(irq=0x%x)\n", irq); in kvmppc_xive_int_off()
799 return 0; in kvmppc_xive_int_off()
826 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp() local
828 if (!xc) in kvmppc_xive_get_icp()
829 return 0; in kvmppc_xive_get_icp()
832 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
833 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
834 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; in kvmppc_xive_get_icp()
839 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp() local
844 if (!xc || !xive) in kvmppc_xive_set_icp()
853 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", in kvmppc_xive_set_icp()
854 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
866 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
869 * Update MFRR state. If it's not 0xff, we mark the VCPU as in kvmppc_xive_set_icp()
874 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
876 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
888 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
893 return 0; in kvmppc_xive_set_icp()
912 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq); in kvmppc_xive_set_mapped()
964 * mask the interrupt in a lossy way (act_priority is 0xff) in kvmppc_xive_set_mapped()
989 return 0; in kvmppc_xive_set_mapped()
1007 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq); in kvmppc_xive_clr_mapped()
1039 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1070 return 0; in kvmppc_xive_clr_mapped()
1076 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts() local
1081 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1086 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { in kvmppc_xive_disable_vcpu_interrupts()
1093 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1100 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1103 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1121 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1122 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1134 struct kvmppc_xive_vcpu *xc, int irq) in xive_cleanup_single_escalation() argument
1152 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu() local
1159 if (!xc) in kvmppc_xive_cleanup_vcpu()
1162 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1165 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1169 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1172 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1173 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1174 if (xc->xive->single_escalation) in kvmppc_xive_cleanup_vcpu()
1175 xive_cleanup_single_escalation(vcpu, xc, in kvmppc_xive_cleanup_vcpu()
1176 xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1177 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1178 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1179 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1184 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1187 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1190 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1191 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1193 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1202 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1203 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1204 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1207 kfree(xc); in kvmppc_xive_cleanup_vcpu()
1247 return 0; in kvmppc_xive_compute_vp_id()
1254 struct kvmppc_xive_vcpu *xc; in kvmppc_xive_connect_vcpu() local
1276 xc = kzalloc(sizeof(*xc), GFP_KERNEL); in kvmppc_xive_connect_vcpu()
1277 if (!xc) { in kvmppc_xive_connect_vcpu()
1282 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1283 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1284 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1285 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1286 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1287 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1288 xc->valid = true; in kvmppc_xive_connect_vcpu()
1290 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1295 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1296 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1299 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1300 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1305 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1307 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1315 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1323 * and we enable escalation for queue 0 only which we'll use for in kvmppc_xive_connect_vcpu()
1328 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_connect_vcpu()
1329 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1338 if (r == 0 && !xive->single_escalation) in kvmppc_xive_connect_vcpu()
1344 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
1345 q, i, NULL, 0, true); in kvmppc_xive_connect_vcpu()
1354 /* If not done above, attach priority 0 escalation */ in kvmppc_xive_connect_vcpu()
1355 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1360 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
1362 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
1372 return 0; in kvmppc_xive_connect_vcpu()
1392 pr_err("invalid irq 0x%x in cpu queue!\n", irq); in xive_pre_save_set_queued()
1402 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); in xive_pre_save_set_queued()
1474 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1478 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
1484 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan() local
1485 if (!xc) in xive_pre_save_scan()
1487 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { in xive_pre_save_scan()
1488 if (xc->queues[j].qpage) in xive_pre_save_scan()
1489 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
1494 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1498 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
1508 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
1512 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_post_save_scan()
1517 xive->saved_src_count = 0; in xive_post_save_scan()
1558 if (xive->saved_src_count == 0) in xive_get_source()
1603 return 0; in xive_get_source()
1627 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_create_src_block()
1629 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
1652 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq() local
1654 if (!xc) in xive_check_delayed_irq()
1657 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
1658 xc->delayed_irq = 0; in xive_check_delayed_irq()
1675 int rc = 0; in xive_set_source()
1680 pr_devel("set_source(irq=0x%lx)\n", irq); in xive_set_source()
1703 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", in xive_set_source()
1712 if (state->ipi_number == 0) { in xive_set_source()
1717 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
1725 * 0 before calling it to ensure it actually performs the masking. in xive_set_source()
1727 state->guest_priority = 0; in xive_set_source()
1756 if (rc == 0) in xive_set_source()
1826 return 0; in xive_set_source()
1855 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { in kvmppc_xive_set_irq()
1856 state->asserted = 0; in kvmppc_xive_set_irq()
1857 return 0; in kvmppc_xive_set_irq()
1863 return 0; in kvmppc_xive_set_irq()
1870 int rc = 0; in kvmppc_xive_set_nr_servers()
1939 return 0; in xive_has_attr()
1944 return 0; in xive_has_attr()
1953 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
1960 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_free_sources()
2027 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2067 memset(xive, 0, sizeof(*xive)); in kvmppc_xive_get_device()
2099 xive->q_page_order = 0; in kvmppc_xive_create()
2113 return 0; in kvmppc_xive_create()
2118 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues() local
2121 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_debug_show_queues()
2122 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2125 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2138 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2139 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2147 xc->esc_virq[i], pq, xd->eoi_page); in kvmppc_xive_debug_show_queues()
2151 return 0; in kvmppc_xive_debug_show_queues()
2159 u64 t_rm_h_xirr = 0; in xive_debug_show()
2160 u64 t_rm_h_ipoll = 0; in xive_debug_show()
2161 u64 t_rm_h_cppr = 0; in xive_debug_show()
2162 u64 t_rm_h_eoi = 0; in xive_debug_show()
2163 u64 t_rm_h_ipi = 0; in xive_debug_show()
2164 u64 t_vm_h_xirr = 0; in xive_debug_show()
2165 u64 t_vm_h_ipoll = 0; in xive_debug_show()
2166 u64 t_vm_h_cppr = 0; in xive_debug_show()
2167 u64 t_vm_h_eoi = 0; in xive_debug_show()
2168 u64 t_vm_h_ipi = 0; in xive_debug_show()
2172 return 0; in xive_debug_show()
2177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show() local
2179 if (!xc) in xive_debug_show()
2184 xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr, in xive_debug_show()
2185 xc->mfrr, xc->pending, in xive_debug_show()
2186 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2190 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2191 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2192 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2193 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2194 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2195 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2196 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2197 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2198 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2199 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
2209 return 0; in xive_debug_show()