Lines Matching +full:0 +full:xc
42 #define DBG_VERBOSE(fmt...) do { } while(0)
75 #define XIVE_BAD_IRQ 0x7fffffff
83 * or 0 if there is no new entry.
92 return 0; in xive_read_eq()
97 return 0; in xive_read_eq()
105 if (q->idx == 0) in xive_read_eq()
109 return cur & 0x7fffffff; in xive_read_eq()
119 * (0xff if none) and return what was found (0 if none).
135 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) in xive_scan_interrupts() argument
137 u32 irq = 0; in xive_scan_interrupts()
138 u8 prio = 0; in xive_scan_interrupts()
141 while (xc->pending_prio != 0) { in xive_scan_interrupts()
144 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
148 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
166 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
173 q = &xc->queue[prio]; in xive_scan_interrupts()
175 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
183 /* If nothing was found, set CPPR to 0xff */ in xive_scan_interrupts()
184 if (irq == 0) in xive_scan_interrupts()
185 prio = 0xff; in xive_scan_interrupts()
188 if (prio != xc->cppr) { in xive_scan_interrupts()
190 xc->cppr = prio; in xive_scan_interrupts()
210 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
246 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xmon_xive_do_dump() local
249 if (xc) { in xmon_xive_do_dump()
250 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
254 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); in xmon_xive_do_dump()
256 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, in xmon_xive_do_dump()
261 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
275 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xmon_xive_get_irq_config()
279 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xmon_xive_get_irq_config()
292 return 0; in xmon_xive_get_irq_config()
299 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_get_irq() local
316 xive_ops->update_pending(xc); in xive_get_irq()
318 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
321 irq = xive_scan_interrupts(xc, false); in xive_get_irq()
323 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", in xive_get_irq()
324 irq, xc->pending_prio); in xive_get_irq()
328 return 0; in xive_get_irq()
342 static void xive_do_queue_eoi(struct xive_cpu *xc) in xive_do_queue_eoi() argument
344 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
345 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
359 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
365 * on P9 DD1.0 needed a latch to be clared in the LPC bridge in xive_do_source_eoi()
395 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
404 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_irq_eoi() local
406 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", in xive_irq_eoi()
407 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
426 xive_do_queue_eoi(xc); in xive_irq_eoi()
468 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_try_pick_target() local
469 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
492 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_dec_target_count() local
493 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
495 if (WARN_ON(cpu < 0 || !xc)) { in xive_dec_target_count()
496 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); in xive_dec_target_count()
522 for (i = 0; i < first && cpu < nr_cpu_ids; i++) in xive_find_target_in_mask()
573 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_pick_irq_target() local
574 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
583 if (cpu >= 0) in xive_pick_irq_target()
600 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", in xive_irq_startup()
643 return 0; in xive_irq_startup()
652 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", in xive_irq_shutdown()
667 0xff, XIVE_BAD_IRQ); in xive_irq_shutdown()
682 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_unmask()
705 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_mask()
712 0xff, d->irq); in xive_irq_mask()
726 int rc = 0; in xive_irq_set_affinity()
768 if (rc < 0) { in xive_irq_set_affinity()
773 pr_devel(" target: 0x%x\n", target); in xive_irq_set_affinity()
813 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", in xive_irq_set_type()
828 return 0; in xive_irq_retrigger()
837 * Note: We pass "0" to the hw_irq argument in order to in xive_irq_retrigger()
842 xive_do_source_eoi(0, xd); in xive_irq_retrigger()
887 return 0; in xive_irq_set_vcpu_affinity()
926 return 0; in xive_irq_set_vcpu_affinity()
967 return 0; in xive_irq_set_vcpu_affinity()
981 return 0; in xive_get_irqchip_state()
1038 return 0; in xive_irq_alloc_data()
1056 struct xive_cpu *xc; in xive_cause_ipi() local
1059 xc = per_cpu(xive_cpu, cpu); in xive_cause_ipi()
1061 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1062 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
1064 xd = &xc->ipi_data; in xive_cause_ipi()
1067 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1077 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_ipi_eoi() local
1080 if (!xc) in xive_ipi_eoi()
1083 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", in xive_ipi_eoi()
1084 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1086 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); in xive_ipi_eoi()
1087 xive_do_queue_eoi(xc); in xive_ipi_eoi()
1118 virq = irq_create_mapping(xive_irq_domain, 0); in xive_request_ipi()
1127 struct xive_cpu *xc; in xive_setup_cpu_ipi() local
1132 xc = per_cpu(xive_cpu, cpu); in xive_setup_cpu_ipi()
1135 if (xc->hw_ipi != 0) in xive_setup_cpu_ipi()
1136 return 0; in xive_setup_cpu_ipi()
1138 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ in xive_setup_cpu_ipi()
1139 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1146 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1151 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1159 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1162 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1164 return 0; in xive_setup_cpu_ipi()
1167 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_ipi() argument
1172 if (xc->hw_ipi == 0) in xive_cleanup_cpu_ipi()
1176 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1185 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1186 0xff, xive_ipi_irq); in xive_cleanup_cpu_ipi()
1189 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1217 /* IPIs are special and come up with HW number 0 */ in xive_irq_domain_map()
1218 if (hw == 0) { in xive_irq_domain_map()
1225 return 0; in xive_irq_domain_map()
1235 return 0; in xive_irq_domain_map()
1256 *out_hwirq = intspec[0]; in xive_irq_domain_xlate()
1270 return 0; in xive_irq_domain_xlate()
1295 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_queues() argument
1297 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1298 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1301 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_setup_cpu_queues() argument
1303 int rc = 0; in xive_setup_cpu_queues()
1306 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1307 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1314 struct xive_cpu *xc; in xive_prepare_cpu() local
1316 xc = per_cpu(xive_cpu, cpu); in xive_prepare_cpu()
1317 if (!xc) { in xive_prepare_cpu()
1320 xc = kzalloc_node(sizeof(struct xive_cpu), in xive_prepare_cpu()
1322 if (!xc) in xive_prepare_cpu()
1326 xc->chip_id = of_get_ibm_chip_id(np); in xive_prepare_cpu()
1329 per_cpu(xive_cpu, cpu) = xc; in xive_prepare_cpu()
1333 return xive_setup_cpu_queues(cpu, xc); in xive_prepare_cpu()
1338 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_setup_cpu() local
1342 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1344 /* Set CPPR to 0xff to enable flow of interrupts */ in xive_setup_cpu()
1345 xc->cppr = 0xff; in xive_setup_cpu()
1346 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_setup_cpu()
1374 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) in xive_flush_cpu_queue() argument
1382 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1396 if (d->domain != xive_irq_domain || hw_irq == 0) in xive_flush_cpu_queue()
1431 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_smp_disable_cpu() local
1437 /* Set CPPR to 0 to disable flow of interrupts */ in xive_smp_disable_cpu()
1438 xc->cppr = 0; in xive_smp_disable_cpu()
1439 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_smp_disable_cpu()
1442 xive_flush_cpu_queue(cpu, xc); in xive_smp_disable_cpu()
1445 xc->cppr = 0xff; in xive_smp_disable_cpu()
1446 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_smp_disable_cpu()
1451 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_flush_interrupt() local
1455 xive_flush_cpu_queue(cpu, xc); in xive_flush_interrupt()
1464 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_teardown_cpu() local
1467 /* Set CPPR to 0 to disable flow of interrupts */ in xive_teardown_cpu()
1468 xc->cppr = 0; in xive_teardown_cpu()
1469 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_teardown_cpu()
1472 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1476 xive_cleanup_cpu_ipi(cpu, xc); in xive_teardown_cpu()
1480 xive_cleanup_cpu_queues(cpu, xc); in xive_teardown_cpu()
1528 memset(qpage, 0, 1 << queue_shift); in xive_queue_page_alloc()
1536 return 0; in xive_off()