Lines Matching refs:xc
139 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) in xive_scan_interrupts() argument
145 while (xc->pending_prio != 0) { in xive_scan_interrupts()
148 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
152 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
159 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
166 q = &xc->queue[prio]; in xive_scan_interrupts()
181 if (prio != xc->cppr) { in xive_scan_interrupts()
183 xc->cppr = prio; in xive_scan_interrupts()
239 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xmon_xive_do_dump() local
242 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
243 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
246 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); in xmon_xive_do_dump()
247 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, in xmon_xive_do_dump()
257 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_get_irq() local
274 xive_ops->update_pending(xc); in xive_get_irq()
276 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
279 irq = xive_scan_interrupts(xc, false); in xive_get_irq()
282 irq, xc->pending_prio); in xive_get_irq()
300 static void xive_do_queue_eoi(struct xive_cpu *xc) in xive_do_queue_eoi() argument
302 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
303 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
361 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_irq_eoi() local
364 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
381 xive_do_queue_eoi(xc); in xive_irq_eoi()
418 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_try_pick_target() local
419 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
442 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_dec_target_count() local
443 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
445 if (unlikely(WARN_ON(cpu < 0 || !xc))) { in xive_dec_target_count()
446 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); in xive_dec_target_count()
524 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_pick_irq_target() local
525 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
990 struct xive_cpu *xc; in xive_cause_ipi() local
993 xc = per_cpu(xive_cpu, cpu); in xive_cause_ipi()
996 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
998 xd = &xc->ipi_data; in xive_cause_ipi()
1011 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_ipi_eoi() local
1014 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1017 if (!xc) in xive_ipi_eoi()
1019 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); in xive_ipi_eoi()
1020 xive_do_queue_eoi(xc); in xive_ipi_eoi()
1060 struct xive_cpu *xc; in xive_setup_cpu_ipi() local
1065 xc = per_cpu(xive_cpu, cpu); in xive_setup_cpu_ipi()
1068 if (xc->hw_ipi != 0) in xive_setup_cpu_ipi()
1072 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1079 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1084 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1092 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1095 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1100 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_ipi() argument
1105 if (xc->hw_ipi == 0) in xive_cleanup_cpu_ipi()
1109 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1118 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1122 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1228 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_queues() argument
1230 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1231 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1234 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_setup_cpu_queues() argument
1239 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1240 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1247 struct xive_cpu *xc; in xive_prepare_cpu() local
1249 xc = per_cpu(xive_cpu, cpu); in xive_prepare_cpu()
1250 if (!xc) { in xive_prepare_cpu()
1253 xc = kzalloc_node(sizeof(struct xive_cpu), in xive_prepare_cpu()
1255 if (!xc) in xive_prepare_cpu()
1259 xc->chip_id = of_get_ibm_chip_id(np); in xive_prepare_cpu()
1262 per_cpu(xive_cpu, cpu) = xc; in xive_prepare_cpu()
1266 return xive_setup_cpu_queues(cpu, xc); in xive_prepare_cpu()
1271 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_setup_cpu() local
1275 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1278 xc->cppr = 0xff; in xive_setup_cpu()
1307 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) in xive_flush_cpu_queue() argument
1315 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1359 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_smp_disable_cpu() local
1366 xc->cppr = 0; in xive_smp_disable_cpu()
1370 xive_flush_cpu_queue(cpu, xc); in xive_smp_disable_cpu()
1373 xc->cppr = 0xff; in xive_smp_disable_cpu()
1379 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_flush_interrupt() local
1383 xive_flush_cpu_queue(cpu, xc); in xive_flush_interrupt()
1392 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_teardown_cpu() local
1396 xc->cppr = 0; in xive_teardown_cpu()
1400 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1404 xive_cleanup_cpu_ipi(cpu, xc); in xive_teardown_cpu()
1408 xive_cleanup_cpu_queues(cpu, xc); in xive_teardown_cpu()