Lines Matching +full:0 +full:xd
33 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) argument
34 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) argument
63 cppr = ack & 0xff; in xive_vm_ack_pending()
80 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) in xive_vm_esb_load() argument
84 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_esb_load()
87 val = __raw_readq(__x_eoi_page(xd) + offset); in xive_vm_esb_load()
95 static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_vm_source_eoi() argument
98 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_vm_source_eoi()
99 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); in xive_vm_source_eoi()
100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in xive_vm_source_eoi()
106 __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); in xive_vm_source_eoi()
119 eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00); in xive_vm_source_eoi()
122 if ((eoi_val & 1) && __x_trig_page(xd)) in xive_vm_source_eoi()
123 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_source_eoi()
136 u32 hirq = 0; in xive_vm_scan_interrupts()
137 u8 prio = 0xff; in xive_vm_scan_interrupts()
140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in xive_vm_scan_interrupts()
146 * If pending is 0 this will return 0xff which is what in xive_vm_scan_interrupts()
175 * Try to fetch from the queue. Will return 0 for a in xive_vm_scan_interrupts()
176 * non-queueing priority (ie, qpage = 0). in xive_vm_scan_interrupts()
186 * We also need to do that if prio is 0 and we had no in xive_vm_scan_interrupts()
194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) { in xive_vm_scan_interrupts()
220 int p = atomic_xchg(&q->pending_count, 0); in xive_vm_scan_interrupts()
267 * loop will only exit with hirq != 0 if prio is lower than in xive_vm_scan_interrupts()
275 * as the HW interrupt we use for IPIs is routed to priority 0. in xive_vm_scan_interrupts()
300 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
309 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", in xive_vm_h_xirr()
313 if (hirq & 0xff000000) in xive_vm_h_xirr()
314 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); in xive_vm_h_xirr()
324 * hirq = 0; in xive_vm_h_xirr()
354 pending = 0xff; in xive_vm_h_ipoll()
358 u8 pipr = be64_to_cpu(qw1) & 0xff; in xive_vm_h_ipoll()
377 if (xc->mfrr != 0xff) { in xive_vm_push_pending_to_hw()
381 pending |= 0x80; in xive_vm_push_pending_to_hw()
401 struct xive_irq_data *xd; in xive_vm_scan_for_rerouted_irqs() local
418 irq = entry & 0x7fffffff; in xive_vm_scan_for_rerouted_irqs()
436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); in xive_vm_scan_for_rerouted_irqs()
439 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_scan_for_rerouted_irqs()
442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_vm_scan_for_rerouted_irqs()
443 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_vm_scan_for_rerouted_irqs()
446 xive_vm_source_eoi(hw_num, xd); in xive_vm_scan_for_rerouted_irqs()
450 if (idx == 0) in xive_vm_scan_for_rerouted_irqs()
521 struct xive_irq_data *xd; in xive_vm_h_eoi() local
523 u32 irq = xirr & 0x00ffffff, hw_num; in xive_vm_h_eoi()
525 int rc = 0; in xive_vm_h_eoi()
539 if (irq == XICS_IPI || irq == 0) { in xive_vm_h_eoi()
559 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_vm_h_eoi()
588 xive_vm_source_eoi(hw_num, xd); in xive_vm_h_eoi()
592 __raw_writeq(0, __x_trig_page(xd)); in xive_vm_h_eoi()
652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data)); in xive_vm_h_ipi()
713 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
746 /* Now P is 0, we can clear the flag */ in kvmppc_xive_push_vcpu()
747 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
772 /* Second load to recover the context state (Words 0 and 1) */ in kvmppc_xive_pull_vcpu()
777 vcpu->arch.xive_saved_state.lsmfb = 0; in kvmppc_xive_pull_vcpu()
778 vcpu->arch.xive_saved_state.ack = 0xff; in kvmppc_xive_pull_vcpu()
779 vcpu->arch.xive_pushed = 0; in kvmppc_xive_pull_vcpu()
825 static bool xive_irq_trigger(struct xive_irq_data *xd) in xive_irq_trigger() argument
828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
832 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
835 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
876 return 0; in kvmppc_xive_attach_escalation()
920 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in kvmppc_xive_attach_escalation() local
922 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_attach_escalation()
923 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
924 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
925 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; in kvmppc_xive_attach_escalation()
928 return 0; in kvmppc_xive_attach_escalation()
931 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
945 return 0; in xive_provision_queue()
954 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
958 * queue is fully configured. This is a requirement for prio 0 in xive_provision_queue()
961 * corresponding queue 0 entries in xive_provision_queue()
983 return 0; in xive_check_provisioning()
992 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive)) in xive_check_provisioning()
1002 return 0; in xive_check_provisioning()
1042 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
1058 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in kvmppc_xive_select_target()
1062 if (rc == 0) in kvmppc_xive_select_target()
1072 if (rc == 0) { in kvmppc_xive_select_target()
1074 pr_devel(" found on 0x%x/%d\n", *server, prio); in kvmppc_xive_select_target()
1088 struct xive_irq_data *xd; in xive_lock_and_mask() local
1113 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_lock_and_mask()
1116 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); in xive_lock_and_mask()
1148 struct xive_irq_data *xd; in xive_finish_unmask() local
1156 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_finish_unmask()
1160 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); in xive_finish_unmask()
1168 xive_vm_source_eoi(hw_num, xd); in xive_finish_unmask()
1275 int rc = 0; in kvmppc_xive_set_xive()
1281 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", in kvmppc_xive_set_xive()
1337 * we have a valid new priority (new_act_prio is not 0xff) in kvmppc_xive_set_xive()
1389 return 0; in kvmppc_xive_get_xive()
1407 pr_devel("int_on(irq=0x%x)\n", irq); in kvmppc_xive_int_on()
1417 /* If saved_priority is 0xff, do nothing */ in kvmppc_xive_int_on()
1419 return 0; in kvmppc_xive_int_on()
1428 return 0; in kvmppc_xive_int_on()
1446 pr_devel("int_off(irq=0x%x)\n", irq); in kvmppc_xive_int_off()
1454 return 0; in kvmppc_xive_int_off()
1484 return 0; in kvmppc_xive_get_icp()
1489 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; in kvmppc_xive_get_icp()
1508 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", in kvmppc_xive_set_icp()
1524 * Update MFRR state. If it's not 0xff, we mark the VCPU as in kvmppc_xive_set_icp()
1548 return 0; in kvmppc_xive_set_icp()
1567 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n", in kvmppc_xive_set_mapped()
1620 * mask the interrupt in a lossy way (act_priority is 0xff) in kvmppc_xive_set_mapped()
1645 return 0; in kvmppc_xive_set_mapped()
1662 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq); in kvmppc_xive_clr_mapped()
1694 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1725 return 0; in kvmppc_xive_clr_mapped()
1736 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1741 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { in kvmppc_xive_disable_vcpu_interrupts()
1755 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1758 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1776 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1777 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1792 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_cleanup_single_escalation() local
1799 xd->stale_p = false; in xive_cleanup_single_escalation()
1802 xd->stale_p = true; in xive_cleanup_single_escalation()
1827 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1842 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1845 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1902 return 0; in kvmppc_xive_compute_vp_id()
1942 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1956 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1966 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1984 * and we enable escalation for queue 0 only which we'll use for in kvmppc_xive_connect_vcpu()
1989 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_connect_vcpu()
1999 if (r == 0 && !kvmppc_xive_has_single_escalation(xive)) in kvmppc_xive_connect_vcpu()
2006 q, i, NULL, 0, true); in kvmppc_xive_connect_vcpu()
2015 /* If not done above, attach priority 0 escalation */ in kvmppc_xive_connect_vcpu()
2016 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive)); in kvmppc_xive_connect_vcpu()
2021 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
2033 return 0; in kvmppc_xive_connect_vcpu()
2053 pr_err("invalid irq 0x%x in cpu queue!\n", irq); in xive_pre_save_set_queued()
2063 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); in xive_pre_save_set_queued()
2136 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2140 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2149 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { in xive_pre_save_scan()
2156 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
2160 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
2170 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
2174 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_post_save_scan()
2179 xive->saved_src_count = 0; in xive_post_save_scan()
2220 if (xive->saved_src_count == 0) in xive_get_source()
2265 return 0; in xive_get_source()
2289 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_create_src_block()
2291 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
2320 xc->delayed_irq = 0; in xive_check_delayed_irq()
2337 int rc = 0; in xive_set_source()
2342 pr_devel("set_source(irq=0x%lx)\n", irq); in xive_set_source()
2365 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", in xive_set_source()
2374 if (state->ipi_number == 0) { in xive_set_source()
2379 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
2387 * 0 before calling it to ensure it actually performs the masking. in xive_set_source()
2389 state->guest_priority = 0; in xive_set_source()
2418 if (rc == 0) in xive_set_source()
2488 return 0; in xive_set_source()
2517 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { in kvmppc_xive_set_irq()
2519 return 0; in kvmppc_xive_set_irq()
2525 return 0; in kvmppc_xive_set_irq()
2532 int rc = 0; in kvmppc_xive_set_nr_servers()
2601 return 0; in xive_has_attr()
2606 return 0; in xive_has_attr()
2612 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) in kvmppc_xive_cleanup_irq() argument
2614 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_irq()
2615 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
2622 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_free_sources()
2689 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2729 memset(xive, 0, sizeof(*xive)); in kvmppc_xive_get_device()
2761 xive->q_page_order = 0; in kvmppc_xive_create()
2779 return 0; in kvmppc_xive_create()
2817 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_debug_show_queues()
2835 struct xive_irq_data *xd = in kvmppc_xive_debug_show_queues() local
2837 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_queues()
2843 xd->eoi_page); in kvmppc_xive_debug_show_queues()
2847 return 0; in kvmppc_xive_debug_show_queues()
2856 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_debug_show_sources()
2858 struct xive_irq_data *xd; in kvmppc_xive_debug_show_sources() local
2865 kvmppc_xive_select_irq(state, &hw_num, &xd); in kvmppc_xive_debug_show_sources()
2867 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); in kvmppc_xive_debug_show_sources()
2870 xd->src_chip); in kvmppc_xive_debug_show_sources()
2892 u64 t_rm_h_xirr = 0; in xive_debug_show()
2893 u64 t_rm_h_ipoll = 0; in xive_debug_show()
2894 u64 t_rm_h_cppr = 0; in xive_debug_show()
2895 u64 t_rm_h_eoi = 0; in xive_debug_show()
2896 u64 t_rm_h_ipi = 0; in xive_debug_show()
2897 u64 t_vm_h_xirr = 0; in xive_debug_show()
2898 u64 t_vm_h_ipoll = 0; in xive_debug_show()
2899 u64 t_vm_h_cppr = 0; in xive_debug_show()
2900 u64 t_vm_h_eoi = 0; in xive_debug_show()
2901 u64 t_vm_h_ipi = 0; in xive_debug_show()
2905 return 0; in xive_debug_show()
2945 for (i = 0; i <= xive->max_sbid; i++) { in xive_debug_show()
2955 return 0; in xive_debug_show()