Lines Matching refs:xd

194 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)  in xive_esb_read()  argument
199 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_read()
202 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_read()
203 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
205 val = in_be64(xd->eoi_mmio + offset); in xive_esb_read()
210 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) in xive_esb_write() argument
213 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_write()
216 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_write()
217 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); in xive_esb_write()
219 out_be64(xd->eoi_mmio + offset, data); in xive_esb_write()
312 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_do_source_eoi() argument
315 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_do_source_eoi()
316 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
317 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { in xive_do_source_eoi()
344 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_do_source_eoi()
345 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); in xive_do_source_eoi()
347 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_eoi()
351 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) in xive_do_source_eoi()
352 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
360 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_eoi() local
371 !(xd->flags & XIVE_IRQ_NO_EOI)) in xive_irq_eoi()
372 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_irq_eoi()
378 xd->saved_p = false; in xive_irq_eoi()
389 static void xive_do_source_set_mask(struct xive_irq_data *xd, in xive_do_source_set_mask() argument
403 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); in xive_do_source_set_mask()
404 xd->saved_p = !!(val & XIVE_ESB_VAL_P); in xive_do_source_set_mask()
405 } else if (xd->saved_p) in xive_do_source_set_mask()
406 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_do_source_set_mask()
408 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_set_mask()
512 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_pick_irq_target() local
520 if (xd->src_chip != XIVE_INVALID_CHIP_ID && in xive_pick_irq_target()
525 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
545 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_startup() local
577 xd->target = target; in xive_irq_startup()
590 xive_do_source_set_mask(xd, false); in xive_irq_startup()
597 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_shutdown() local
603 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) in xive_irq_shutdown()
607 xive_do_source_set_mask(xd, true); in xive_irq_shutdown()
617 xd->saved_p = false; in xive_irq_shutdown()
624 get_hard_smp_processor_id(xd->target), in xive_irq_shutdown()
627 xive_dec_target_count(xd->target); in xive_irq_shutdown()
628 xd->target = XIVE_INVALID_TARGET; in xive_irq_shutdown()
633 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_unmask() local
635 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); in xive_irq_unmask()
643 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_unmask()
646 get_hard_smp_processor_id(xd->target), in xive_irq_unmask()
651 xive_do_source_set_mask(xd, false); in xive_irq_unmask()
656 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_mask() local
658 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); in xive_irq_mask()
666 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_mask()
669 get_hard_smp_processor_id(xd->target), in xive_irq_mask()
674 xive_do_source_set_mask(xd, true); in xive_irq_mask()
681 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_affinity() local
700 if (xd->target != XIVE_INVALID_TARGET && in xive_irq_set_affinity()
701 cpu_online(xd->target) && in xive_irq_set_affinity()
702 cpumask_test_cpu(xd->target, cpumask)) in xive_irq_set_affinity()
716 old_target = xd->target; in xive_irq_set_affinity()
732 xd->target = target; in xive_irq_set_affinity()
743 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_type() local
770 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { in xive_irq_set_type()
774 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); in xive_irq_set_type()
782 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_retrigger() local
785 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_retrigger()
792 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_retrigger()
800 xive_do_source_eoi(0, xd); in xive_irq_retrigger()
807 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_vcpu_affinity() local
816 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) in xive_irq_set_vcpu_affinity()
827 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_irq_set_vcpu_affinity()
830 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
856 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_set_vcpu_affinity()
857 xd->saved_p = true; in xive_irq_set_vcpu_affinity()
871 xd->saved_p = false; in xive_irq_set_vcpu_affinity()
876 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
877 xive_do_source_set_mask(xd, true); in xive_irq_set_vcpu_affinity()
898 get_hard_smp_processor_id(xd->target), in xive_irq_set_vcpu_affinity()
915 if (!xd->saved_p) in xive_irq_set_vcpu_affinity()
916 xive_do_source_eoi(hw_irq, xd); in xive_irq_set_vcpu_affinity()
941 void xive_cleanup_irq_data(struct xive_irq_data *xd) in xive_cleanup_irq_data() argument
943 if (xd->eoi_mmio) { in xive_cleanup_irq_data()
944 iounmap(xd->eoi_mmio); in xive_cleanup_irq_data()
945 if (xd->eoi_mmio == xd->trig_mmio) in xive_cleanup_irq_data()
946 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
947 xd->eoi_mmio = NULL; in xive_cleanup_irq_data()
949 if (xd->trig_mmio) { in xive_cleanup_irq_data()
950 iounmap(xd->trig_mmio); in xive_cleanup_irq_data()
951 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
958 struct xive_irq_data *xd; in xive_irq_alloc_data() local
961 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); in xive_irq_alloc_data()
962 if (!xd) in xive_irq_alloc_data()
964 rc = xive_ops->populate_irq_data(hw, xd); in xive_irq_alloc_data()
966 kfree(xd); in xive_irq_alloc_data()
969 xd->target = XIVE_INVALID_TARGET; in xive_irq_alloc_data()
970 irq_set_handler_data(virq, xd); in xive_irq_alloc_data()
977 struct xive_irq_data *xd = irq_get_handler_data(virq); in xive_irq_free_data() local
979 if (!xd) in xive_irq_free_data()
982 xive_cleanup_irq_data(xd); in xive_irq_free_data()
983 kfree(xd); in xive_irq_free_data()
991 struct xive_irq_data *xd; in xive_cause_ipi() local
998 xd = &xc->ipi_data; in xive_cause_ipi()
999 if (WARN_ON(!xd->trig_mmio)) in xive_cause_ipi()
1001 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1322 struct xive_irq_data *xd; in xive_flush_cpu_queue() local
1342 xd = irq_desc_get_handler_data(desc); in xive_flush_cpu_queue()
1348 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_flush_cpu_queue()
1349 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_flush_cpu_queue()