Lines Matching +full:0 +full:xc

43 #define DBG_VERBOSE(fmt...)	do { } while(0)
99 * or 0 if there is no new entry.
108 return 0; in xive_read_eq()
113 return 0; in xive_read_eq()
121 if (q->idx == 0) in xive_read_eq()
125 return cur & 0x7fffffff; in xive_read_eq()
135 * (0xff if none) and return what was found (0 if none).
151 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) in xive_scan_interrupts() argument
153 u32 irq = 0; in xive_scan_interrupts()
154 u8 prio = 0; in xive_scan_interrupts()
157 while (xc->pending_prio != 0) { in xive_scan_interrupts()
160 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
164 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
182 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
189 q = &xc->queue[prio]; in xive_scan_interrupts()
191 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
199 /* If nothing was found, set CPPR to 0xff */ in xive_scan_interrupts()
200 if (irq == 0) in xive_scan_interrupts()
201 prio = 0xff; in xive_scan_interrupts()
204 if (prio != xc->cppr) { in xive_scan_interrupts()
206 xc->cppr = prio; in xive_scan_interrupts()
225 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
245 snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx", in xive_irq_data_dump()
272 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xmon_xive_do_dump() local
275 if (xc) { in xmon_xive_do_dump()
276 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
282 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xmon_xive_do_dump()
283 xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer); in xmon_xive_do_dump()
286 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
307 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xmon_xive_get_irq_config()
311 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xmon_xive_get_irq_config()
326 return 0; in xmon_xive_get_irq_config()
346 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_get_irq() local
363 xive_ops->update_pending(xc); in xive_get_irq()
365 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
368 irq = xive_scan_interrupts(xc, false); in xive_get_irq()
370 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", in xive_get_irq()
371 irq, xc->pending_prio); in xive_get_irq()
375 return 0; in xive_get_irq()
389 static void xive_do_queue_eoi(struct xive_cpu *xc) in xive_do_queue_eoi() argument
391 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
392 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
409 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
434 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
441 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_irq_eoi() local
443 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", in xive_irq_eoi()
444 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
463 xive_do_queue_eoi(xc); in xive_irq_eoi()
474 pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un"); in xive_do_source_set_mask()
505 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_try_pick_target() local
506 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
529 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_dec_target_count() local
530 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
532 if (WARN_ON(cpu < 0 || !xc)) { in xive_dec_target_count()
533 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); in xive_dec_target_count()
559 for (i = 0; i < first && cpu < nr_cpu_ids; i++) in xive_find_target_in_mask()
610 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_pick_irq_target() local
611 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
620 if (cpu >= 0) in xive_pick_irq_target()
638 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); in xive_irq_startup()
670 return 0; in xive_irq_startup()
679 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); in xive_irq_shutdown()
693 0xff, XIVE_BAD_IRQ); in xive_irq_shutdown()
724 int rc = 0; in xive_irq_set_affinity()
726 pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq); in xive_irq_set_affinity()
762 if (rc < 0) { in xive_irq_set_affinity()
767 pr_debug(" target: 0x%x\n", target); in xive_irq_set_affinity()
807 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", in xive_irq_set_type()
822 return 0; in xive_irq_retrigger()
867 return 0; in xive_irq_set_vcpu_affinity()
906 return 0; in xive_irq_set_vcpu_affinity()
947 return 0; in xive_irq_set_vcpu_affinity()
971 return 0; in xive_get_irqchip_state()
999 pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq); in xive_cleanup_irq_data()
1039 return 0; in xive_irq_alloc_data()
1058 struct xive_cpu *xc; in xive_cause_ipi() local
1061 xc = per_cpu(xive_cpu, cpu); in xive_cause_ipi()
1063 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1064 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
1066 xd = &xc->ipi_data; in xive_cause_ipi()
1069 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1079 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_ipi_eoi() local
1082 if (!xc) in xive_ipi_eoi()
1085 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", in xive_ipi_eoi()
1086 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1088 xive_do_source_eoi(&xc->ipi_data); in xive_ipi_eoi()
1089 xive_do_queue_eoi(xc); in xive_ipi_eoi()
1121 for (i = 0; i < nr_irqs; i++) { in xive_ipi_irq_domain_alloc()
1126 return 0; in xive_ipi_irq_domain_alloc()
1163 if (ret < 0) in xive_init_ipis()
1188 return 0; in xive_request_ipi()
1194 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); in xive_request_ipi()
1201 struct xive_cpu *xc; in xive_setup_cpu_ipi() local
1206 xc = per_cpu(xive_cpu, cpu); in xive_setup_cpu_ipi()
1209 if (xc->hw_ipi != XIVE_BAD_IRQ) in xive_setup_cpu_ipi()
1210 return 0; in xive_setup_cpu_ipi()
1215 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ in xive_setup_cpu_ipi()
1216 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1223 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1228 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1235 pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu, in xive_setup_cpu_ipi()
1236 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1239 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1241 return 0; in xive_setup_cpu_ipi()
1244 noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_ipi() argument
1251 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_cleanup_cpu_ipi()
1257 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1266 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1267 0xff, xive_ipi_irq); in xive_cleanup_cpu_ipi()
1270 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1303 return 0; in xive_irq_domain_map()
1316 *out_hwirq = intspec[0]; in xive_irq_domain_xlate()
1330 return 0; in xive_irq_domain_xlate()
1376 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); in xive_irq_domain_debug_show()
1381 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); in xive_irq_domain_debug_show()
1382 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); in xive_irq_domain_debug_show()
1383 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); in xive_irq_domain_debug_show()
1384 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { in xive_irq_domain_debug_show()
1414 pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs); in xive_irq_domain_alloc()
1416 for (i = 0; i < nr_irqs; i++) { in xive_irq_domain_alloc()
1435 return 0; in xive_irq_domain_alloc()
1445 for (i = 0; i < nr_irqs; i++) in xive_irq_domain_free()
1473 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_queues() argument
1475 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1476 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1479 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_setup_cpu_queues() argument
1481 int rc = 0; in xive_setup_cpu_queues()
1484 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1485 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1492 struct xive_cpu *xc; in xive_prepare_cpu() local
1494 xc = per_cpu(xive_cpu, cpu); in xive_prepare_cpu()
1495 if (!xc) { in xive_prepare_cpu()
1496 xc = kzalloc_node(sizeof(struct xive_cpu), in xive_prepare_cpu()
1498 if (!xc) in xive_prepare_cpu()
1500 xc->hw_ipi = XIVE_BAD_IRQ; in xive_prepare_cpu()
1501 xc->chip_id = XIVE_INVALID_CHIP_ID; in xive_prepare_cpu()
1503 xive_ops->prepare_cpu(cpu, xc); in xive_prepare_cpu()
1505 per_cpu(xive_cpu, cpu) = xc; in xive_prepare_cpu()
1509 return xive_setup_cpu_queues(cpu, xc); in xive_prepare_cpu()
1514 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_setup_cpu() local
1518 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1520 /* Set CPPR to 0xff to enable flow of interrupts */ in xive_setup_cpu()
1521 xc->cppr = 0xff; in xive_setup_cpu()
1522 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_setup_cpu()
1550 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) in xive_flush_cpu_queue() argument
1558 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1606 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_smp_disable_cpu() local
1612 /* Set CPPR to 0 to disable flow of interrupts */ in xive_smp_disable_cpu()
1613 xc->cppr = 0; in xive_smp_disable_cpu()
1614 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_smp_disable_cpu()
1617 xive_flush_cpu_queue(cpu, xc); in xive_smp_disable_cpu()
1620 xc->cppr = 0xff; in xive_smp_disable_cpu()
1621 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_smp_disable_cpu()
1626 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_flush_interrupt() local
1630 xive_flush_cpu_queue(cpu, xc); in xive_flush_interrupt()
1639 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_teardown_cpu() local
1642 /* Set CPPR to 0 to disable flow of interrupts */ in xive_teardown_cpu()
1643 xc->cppr = 0; in xive_teardown_cpu()
1644 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_teardown_cpu()
1647 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1651 xive_cleanup_cpu_ipi(cpu, xc); in xive_teardown_cpu()
1655 xive_cleanup_cpu_queues(cpu, xc); in xive_teardown_cpu()
1703 memset(qpage, 0, 1 << queue_shift); in xive_queue_page_alloc()
1720 if (strncmp(arg, "off", 3) == 0) { in xive_store_eoi_cmdline()
1731 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_debug_show_ipi() local
1734 if (xc) { in xive_debug_show_ipi()
1735 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xive_debug_show_ipi()
1741 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); in xive_debug_show_ipi()
1742 seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer); in xive_debug_show_ipi()
1760 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xive_debug_show_irq()
1764 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xive_debug_show_irq()
1783 return 0; in xive_irq_debug_show()
1796 return 0; in xive_ipi_debug_show()
1806 for (i = 0; i < q->msk + 1; i++) { in xive_eq_debug_show_one()
1819 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_eq_debug_show() local
1821 if (xc) in xive_eq_debug_show()
1822 xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority], in xive_eq_debug_show()
1824 return 0; in xive_eq_debug_show()
1863 return 0; in xive_core_debug_init()