1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/irq.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
20 #include <linux/mm.h>
21 #include <linux/kmemleak.h>
22
23 #include <asm/machdep.h>
24 #include <asm/prom.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/irq.h>
28 #include <asm/errno.h>
29 #include <asm/xive.h>
30 #include <asm/xive-regs.h>
31 #include <asm/opal.h>
32 #include <asm/kvm_ppc.h>
33
34 #include "xive-internal.h"
35
36
37 static u32 xive_provision_size;
38 static u32 *xive_provision_chips;
39 static u32 xive_provision_chip_count;
40 static u32 xive_queue_shift;
41 static u32 xive_pool_vps = XIVE_INVALID_VP;
42 static struct kmem_cache *xive_provision_cache;
43 static bool xive_has_single_esc;
44
xive_native_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)45 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
46 {
47 __be64 flags, eoi_page, trig_page;
48 __be32 esb_shift, src_chip;
49 u64 opal_flags;
50 s64 rc;
51
52 memset(data, 0, sizeof(*data));
53
54 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
55 &esb_shift, &src_chip);
56 if (rc) {
57 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
58 hw_irq, rc);
59 return -EINVAL;
60 }
61
62 opal_flags = be64_to_cpu(flags);
63 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
64 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
65 if (opal_flags & OPAL_XIVE_IRQ_LSI)
66 data->flags |= XIVE_IRQ_FLAG_LSI;
67 if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
68 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
69 if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
70 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
71 if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
72 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
73 data->eoi_page = be64_to_cpu(eoi_page);
74 data->trig_page = be64_to_cpu(trig_page);
75 data->esb_shift = be32_to_cpu(esb_shift);
76 data->src_chip = be32_to_cpu(src_chip);
77
78 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
79 if (!data->eoi_mmio) {
80 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
81 return -ENOMEM;
82 }
83
84 data->hw_irq = hw_irq;
85
86 if (!data->trig_page)
87 return 0;
88 if (data->trig_page == data->eoi_page) {
89 data->trig_mmio = data->eoi_mmio;
90 return 0;
91 }
92
93 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
94 if (!data->trig_mmio) {
95 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
96 return -ENOMEM;
97 }
98 return 0;
99 }
100 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
101
xive_native_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)102 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
103 {
104 s64 rc;
105
106 for (;;) {
107 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
108 if (rc != OPAL_BUSY)
109 break;
110 msleep(OPAL_BUSY_DELAY_MS);
111 }
112 return rc == 0 ? 0 : -ENXIO;
113 }
114 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
115
xive_native_get_irq_config(u32 hw_irq,u32 * target,u8 * prio,u32 * sw_irq)116 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
117 u32 *sw_irq)
118 {
119 s64 rc;
120 __be64 vp;
121 __be32 lirq;
122
123 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
124
125 *target = be64_to_cpu(vp);
126 *sw_irq = be32_to_cpu(lirq);
127
128 return rc == 0 ? 0 : -ENXIO;
129 }
130
131 /* This can be called multiple time to change a queue configuration */
xive_native_configure_queue(u32 vp_id,struct xive_q * q,u8 prio,__be32 * qpage,u32 order,bool can_escalate)132 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
133 __be32 *qpage, u32 order, bool can_escalate)
134 {
135 s64 rc = 0;
136 __be64 qeoi_page_be;
137 __be32 esc_irq_be;
138 u64 flags, qpage_phys;
139
140 /* If there's an actual queue page, clean it */
141 if (order) {
142 if (WARN_ON(!qpage))
143 return -EINVAL;
144 qpage_phys = __pa(qpage);
145 } else
146 qpage_phys = 0;
147
148 /* Initialize the rest of the fields */
149 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
150 q->idx = 0;
151 q->toggle = 0;
152
153 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
154 &qeoi_page_be,
155 &esc_irq_be,
156 NULL);
157 if (rc) {
158 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
159 rc = -EIO;
160 goto fail;
161 }
162 q->eoi_phys = be64_to_cpu(qeoi_page_be);
163
164 /* Default flags */
165 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
166
167 /* Escalation needed ? */
168 if (can_escalate) {
169 q->esc_irq = be32_to_cpu(esc_irq_be);
170 flags |= OPAL_XIVE_EQ_ESCALATE;
171 }
172
173 /* Configure and enable the queue in HW */
174 for (;;) {
175 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
176 if (rc != OPAL_BUSY)
177 break;
178 msleep(OPAL_BUSY_DELAY_MS);
179 }
180 if (rc) {
181 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
182 rc = -EIO;
183 } else {
184 /*
185 * KVM code requires all of the above to be visible before
186 * q->qpage is set due to how it manages IPI EOIs
187 */
188 wmb();
189 q->qpage = qpage;
190 }
191 fail:
192 return rc;
193 }
194 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
195
__xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)196 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
197 {
198 s64 rc;
199
200 /* Disable the queue in HW */
201 for (;;) {
202 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
203 if (rc != OPAL_BUSY)
204 break;
205 msleep(OPAL_BUSY_DELAY_MS);
206 }
207 if (rc)
208 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
209 }
210
xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)211 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
212 {
213 __xive_native_disable_queue(vp_id, q, prio);
214 }
215 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
216
xive_native_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)217 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
218 {
219 struct xive_q *q = &xc->queue[prio];
220 __be32 *qpage;
221
222 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
223 if (IS_ERR(qpage))
224 return PTR_ERR(qpage);
225
226 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
227 q, prio, qpage, xive_queue_shift, false);
228 }
229
xive_native_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)230 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
231 {
232 struct xive_q *q = &xc->queue[prio];
233 unsigned int alloc_order;
234
235 /*
236 * We use the variant with no iounmap as this is called on exec
237 * from an IPI and iounmap isn't safe
238 */
239 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
240 alloc_order = xive_alloc_order(xive_queue_shift);
241 free_pages((unsigned long)q->qpage, alloc_order);
242 q->qpage = NULL;
243 }
244
xive_native_match(struct device_node * node)245 static bool xive_native_match(struct device_node *node)
246 {
247 return of_device_is_compatible(node, "ibm,opal-xive-vc");
248 }
249
opal_xive_allocate_irq(u32 chip_id)250 static s64 opal_xive_allocate_irq(u32 chip_id)
251 {
252 s64 irq = opal_xive_allocate_irq_raw(chip_id);
253
254 /*
255 * Old versions of skiboot can incorrectly return 0xffffffff to
256 * indicate no space, fix it up here.
257 */
258 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
259 }
260
261 #ifdef CONFIG_SMP
xive_native_get_ipi(unsigned int cpu,struct xive_cpu * xc)262 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
263 {
264 s64 irq;
265
266 /* Allocate an IPI and populate info about it */
267 for (;;) {
268 irq = opal_xive_allocate_irq(xc->chip_id);
269 if (irq == OPAL_BUSY) {
270 msleep(OPAL_BUSY_DELAY_MS);
271 continue;
272 }
273 if (irq < 0) {
274 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
275 return -ENXIO;
276 }
277 xc->hw_ipi = irq;
278 break;
279 }
280 return 0;
281 }
282 #endif /* CONFIG_SMP */
283
xive_native_alloc_irq_on_chip(u32 chip_id)284 u32 xive_native_alloc_irq_on_chip(u32 chip_id)
285 {
286 s64 rc;
287
288 for (;;) {
289 rc = opal_xive_allocate_irq(chip_id);
290 if (rc != OPAL_BUSY)
291 break;
292 msleep(OPAL_BUSY_DELAY_MS);
293 }
294 if (rc < 0)
295 return 0;
296 return rc;
297 }
298 EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
299
xive_native_free_irq(u32 irq)300 void xive_native_free_irq(u32 irq)
301 {
302 for (;;) {
303 s64 rc = opal_xive_free_irq(irq);
304 if (rc != OPAL_BUSY)
305 break;
306 msleep(OPAL_BUSY_DELAY_MS);
307 }
308 }
309 EXPORT_SYMBOL_GPL(xive_native_free_irq);
310
311 #ifdef CONFIG_SMP
xive_native_put_ipi(unsigned int cpu,struct xive_cpu * xc)312 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
313 {
314 s64 rc;
315
316 /* Free the IPI */
317 if (xc->hw_ipi == XIVE_BAD_IRQ)
318 return;
319 for (;;) {
320 rc = opal_xive_free_irq(xc->hw_ipi);
321 if (rc == OPAL_BUSY) {
322 msleep(OPAL_BUSY_DELAY_MS);
323 continue;
324 }
325 xc->hw_ipi = XIVE_BAD_IRQ;
326 break;
327 }
328 }
329 #endif /* CONFIG_SMP */
330
xive_native_shutdown(void)331 static void xive_native_shutdown(void)
332 {
333 /* Switch the XIVE to emulation mode */
334 opal_xive_reset(OPAL_XIVE_MODE_EMU);
335 }
336
337 /*
338 * Perform an "ack" cycle on the current thread, thus
339 * grabbing the pending active priorities and updating
340 * the CPPR to the most favored one.
341 */
xive_native_update_pending(struct xive_cpu * xc)342 static void xive_native_update_pending(struct xive_cpu *xc)
343 {
344 u8 he, cppr;
345 u16 ack;
346
347 /* Perform the acknowledge hypervisor to register cycle */
348 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
349
350 /* Synchronize subsequent queue accesses */
351 mb();
352
353 /*
354 * Grab the CPPR and the "HE" field which indicates the source
355 * of the hypervisor interrupt (if any)
356 */
357 cppr = ack & 0xff;
358 he = (ack >> 8) >> 6;
359 switch(he) {
360 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
361 break;
362 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
363 if (cppr == 0xff)
364 return;
365 /* Mark the priority pending */
366 xc->pending_prio |= 1 << cppr;
367
368 /*
369 * A new interrupt should never have a CPPR less favored
370 * than our current one.
371 */
372 if (cppr >= xc->cppr)
373 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
374 smp_processor_id(), cppr, xc->cppr);
375
376 /* Update our idea of what the CPPR is */
377 xc->cppr = cppr;
378 break;
379 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
380 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
381 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
382 smp_processor_id(), he);
383 return;
384 }
385 }
386
xive_native_eoi(u32 hw_irq)387 static void xive_native_eoi(u32 hw_irq)
388 {
389 /*
390 * Not normally used except if specific interrupts need
391 * a workaround on EOI.
392 */
393 opal_int_eoi(hw_irq);
394 }
395
xive_native_setup_cpu(unsigned int cpu,struct xive_cpu * xc)396 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
397 {
398 s64 rc;
399 u32 vp;
400 __be64 vp_cam_be;
401 u64 vp_cam;
402
403 if (xive_pool_vps == XIVE_INVALID_VP)
404 return;
405
406 /* Check if pool VP already active, if it is, pull it */
407 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
408 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
409
410 /* Enable the pool VP */
411 vp = xive_pool_vps + cpu;
412 for (;;) {
413 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
414 if (rc != OPAL_BUSY)
415 break;
416 msleep(OPAL_BUSY_DELAY_MS);
417 }
418 if (rc) {
419 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
420 return;
421 }
422
423 /* Grab it's CAM value */
424 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
425 if (rc) {
426 pr_err("Failed to get pool VP info CPU %d\n", cpu);
427 return;
428 }
429 vp_cam = be64_to_cpu(vp_cam_be);
430
431 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
432 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
433 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
434 }
435
xive_native_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)436 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
437 {
438 s64 rc;
439 u32 vp;
440
441 if (xive_pool_vps == XIVE_INVALID_VP)
442 return;
443
444 /* Pull the pool VP from the CPU */
445 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
446
447 /* Disable it */
448 vp = xive_pool_vps + cpu;
449 for (;;) {
450 rc = opal_xive_set_vp_info(vp, 0, 0);
451 if (rc != OPAL_BUSY)
452 break;
453 msleep(OPAL_BUSY_DELAY_MS);
454 }
455 }
456
xive_native_sync_source(u32 hw_irq)457 void xive_native_sync_source(u32 hw_irq)
458 {
459 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
460 }
461 EXPORT_SYMBOL_GPL(xive_native_sync_source);
462
xive_native_sync_queue(u32 hw_irq)463 void xive_native_sync_queue(u32 hw_irq)
464 {
465 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
466 }
467 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
468
469 static const struct xive_ops xive_native_ops = {
470 .populate_irq_data = xive_native_populate_irq_data,
471 .configure_irq = xive_native_configure_irq,
472 .get_irq_config = xive_native_get_irq_config,
473 .setup_queue = xive_native_setup_queue,
474 .cleanup_queue = xive_native_cleanup_queue,
475 .match = xive_native_match,
476 .shutdown = xive_native_shutdown,
477 .update_pending = xive_native_update_pending,
478 .eoi = xive_native_eoi,
479 .setup_cpu = xive_native_setup_cpu,
480 .teardown_cpu = xive_native_teardown_cpu,
481 .sync_source = xive_native_sync_source,
482 #ifdef CONFIG_SMP
483 .get_ipi = xive_native_get_ipi,
484 .put_ipi = xive_native_put_ipi,
485 #endif /* CONFIG_SMP */
486 .name = "native",
487 };
488
xive_parse_provisioning(struct device_node * np)489 static bool xive_parse_provisioning(struct device_node *np)
490 {
491 int rc;
492
493 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
494 &xive_provision_size) < 0)
495 return true;
496 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
497 if (rc < 0) {
498 pr_err("Error %d getting provision chips array\n", rc);
499 return false;
500 }
501 xive_provision_chip_count = rc;
502 if (rc == 0)
503 return true;
504
505 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
506 GFP_KERNEL);
507 if (WARN_ON(!xive_provision_chips))
508 return false;
509
510 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
511 xive_provision_chips,
512 xive_provision_chip_count);
513 if (rc < 0) {
514 pr_err("Error %d reading provision chips array\n", rc);
515 return false;
516 }
517
518 xive_provision_cache = kmem_cache_create("xive-provision",
519 xive_provision_size,
520 xive_provision_size,
521 0, NULL);
522 if (!xive_provision_cache) {
523 pr_err("Failed to allocate provision cache\n");
524 return false;
525 }
526 return true;
527 }
528
xive_native_setup_pools(void)529 static void xive_native_setup_pools(void)
530 {
531 /* Allocate a pool big enough */
532 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
533
534 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
535 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
536 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
537
538 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
539 xive_pool_vps, nr_cpu_ids);
540 }
541
xive_native_default_eq_shift(void)542 u32 xive_native_default_eq_shift(void)
543 {
544 return xive_queue_shift;
545 }
546 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
547
548 unsigned long xive_tima_os;
549 EXPORT_SYMBOL_GPL(xive_tima_os);
550
xive_native_init(void)551 bool __init xive_native_init(void)
552 {
553 struct device_node *np;
554 struct resource r;
555 void __iomem *tima;
556 struct property *prop;
557 u8 max_prio = 7;
558 const __be32 *p;
559 u32 val, cpu;
560 s64 rc;
561
562 if (xive_cmdline_disabled)
563 return false;
564
565 pr_devel("xive_native_init()\n");
566 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
567 if (!np) {
568 pr_devel("not found !\n");
569 return false;
570 }
571 pr_devel("Found %pOF\n", np);
572
573 /* Resource 1 is HV window */
574 if (of_address_to_resource(np, 1, &r)) {
575 pr_err("Failed to get thread mgmnt area resource\n");
576 return false;
577 }
578 tima = ioremap(r.start, resource_size(&r));
579 if (!tima) {
580 pr_err("Failed to map thread mgmnt area\n");
581 return false;
582 }
583
584 /* Read number of priorities */
585 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
586 max_prio = val - 1;
587
588 /* Iterate the EQ sizes and pick one */
589 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
590 xive_queue_shift = val;
591 if (val == PAGE_SHIFT)
592 break;
593 }
594
595 /* Do we support single escalation */
596 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
597 xive_has_single_esc = true;
598
599 /* Configure Thread Management areas for KVM */
600 for_each_possible_cpu(cpu)
601 kvmppc_set_xive_tima(cpu, r.start, tima);
602
603 /* Resource 2 is OS window */
604 if (of_address_to_resource(np, 2, &r)) {
605 pr_err("Failed to get thread mgmnt area resource\n");
606 return false;
607 }
608
609 xive_tima_os = r.start;
610
611 /* Grab size of provisionning pages */
612 xive_parse_provisioning(np);
613
614 /* Switch the XIVE to exploitation mode */
615 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
616 if (rc) {
617 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
618 return false;
619 }
620
621 /* Setup some dummy HV pool VPs */
622 xive_native_setup_pools();
623
624 /* Initialize XIVE core with our backend */
625 if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
626 max_prio)) {
627 opal_xive_reset(OPAL_XIVE_MODE_EMU);
628 return false;
629 }
630 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
631 return true;
632 }
633
xive_native_provision_pages(void)634 static bool xive_native_provision_pages(void)
635 {
636 u32 i;
637 void *p;
638
639 for (i = 0; i < xive_provision_chip_count; i++) {
640 u32 chip = xive_provision_chips[i];
641
642 /*
643 * XXX TODO: Try to make the allocation local to the node where
644 * the chip resides.
645 */
646 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
647 if (!p) {
648 pr_err("Failed to allocate provisioning page\n");
649 return false;
650 }
651 kmemleak_ignore(p);
652 opal_xive_donate_page(chip, __pa(p));
653 }
654 return true;
655 }
656
xive_native_alloc_vp_block(u32 max_vcpus)657 u32 xive_native_alloc_vp_block(u32 max_vcpus)
658 {
659 s64 rc;
660 u32 order;
661
662 order = fls(max_vcpus) - 1;
663 if (max_vcpus > (1 << order))
664 order++;
665
666 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
667 max_vcpus, order);
668
669 for (;;) {
670 rc = opal_xive_alloc_vp_block(order);
671 switch (rc) {
672 case OPAL_BUSY:
673 msleep(OPAL_BUSY_DELAY_MS);
674 break;
675 case OPAL_XIVE_PROVISIONING:
676 if (!xive_native_provision_pages())
677 return XIVE_INVALID_VP;
678 break;
679 default:
680 if (rc < 0) {
681 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
682 order, rc);
683 return XIVE_INVALID_VP;
684 }
685 return rc;
686 }
687 }
688 }
689 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
690
xive_native_free_vp_block(u32 vp_base)691 void xive_native_free_vp_block(u32 vp_base)
692 {
693 s64 rc;
694
695 if (vp_base == XIVE_INVALID_VP)
696 return;
697
698 rc = opal_xive_free_vp_block(vp_base);
699 if (rc < 0)
700 pr_warn("OPAL error %lld freeing VP block\n", rc);
701 }
702 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
703
xive_native_enable_vp(u32 vp_id,bool single_escalation)704 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
705 {
706 s64 rc;
707 u64 flags = OPAL_XIVE_VP_ENABLED;
708
709 if (single_escalation)
710 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
711 for (;;) {
712 rc = opal_xive_set_vp_info(vp_id, flags, 0);
713 if (rc != OPAL_BUSY)
714 break;
715 msleep(OPAL_BUSY_DELAY_MS);
716 }
717 return rc ? -EIO : 0;
718 }
719 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
720
xive_native_disable_vp(u32 vp_id)721 int xive_native_disable_vp(u32 vp_id)
722 {
723 s64 rc;
724
725 for (;;) {
726 rc = opal_xive_set_vp_info(vp_id, 0, 0);
727 if (rc != OPAL_BUSY)
728 break;
729 msleep(OPAL_BUSY_DELAY_MS);
730 }
731 return rc ? -EIO : 0;
732 }
733 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
734
xive_native_get_vp_info(u32 vp_id,u32 * out_cam_id,u32 * out_chip_id)735 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
736 {
737 __be64 vp_cam_be;
738 __be32 vp_chip_id_be;
739 s64 rc;
740
741 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
742 if (rc)
743 return -EIO;
744 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
745 *out_chip_id = be32_to_cpu(vp_chip_id_be);
746
747 return 0;
748 }
749 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
750
xive_native_has_single_escalation(void)751 bool xive_native_has_single_escalation(void)
752 {
753 return xive_has_single_esc;
754 }
755 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
756
xive_native_get_queue_info(u32 vp_id,u32 prio,u64 * out_qpage,u64 * out_qsize,u64 * out_qeoi_page,u32 * out_escalate_irq,u64 * out_qflags)757 int xive_native_get_queue_info(u32 vp_id, u32 prio,
758 u64 *out_qpage,
759 u64 *out_qsize,
760 u64 *out_qeoi_page,
761 u32 *out_escalate_irq,
762 u64 *out_qflags)
763 {
764 __be64 qpage;
765 __be64 qsize;
766 __be64 qeoi_page;
767 __be32 escalate_irq;
768 __be64 qflags;
769 s64 rc;
770
771 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
772 &qeoi_page, &escalate_irq, &qflags);
773 if (rc) {
774 pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
775 vp_id, prio, rc);
776 return -EIO;
777 }
778
779 if (out_qpage)
780 *out_qpage = be64_to_cpu(qpage);
781 if (out_qsize)
782 *out_qsize = be32_to_cpu(qsize);
783 if (out_qeoi_page)
784 *out_qeoi_page = be64_to_cpu(qeoi_page);
785 if (out_escalate_irq)
786 *out_escalate_irq = be32_to_cpu(escalate_irq);
787 if (out_qflags)
788 *out_qflags = be64_to_cpu(qflags);
789
790 return 0;
791 }
792 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
793
xive_native_get_queue_state(u32 vp_id,u32 prio,u32 * qtoggle,u32 * qindex)794 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
795 {
796 __be32 opal_qtoggle;
797 __be32 opal_qindex;
798 s64 rc;
799
800 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
801 &opal_qindex);
802 if (rc) {
803 pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
804 vp_id, prio, rc);
805 return -EIO;
806 }
807
808 if (qtoggle)
809 *qtoggle = be32_to_cpu(opal_qtoggle);
810 if (qindex)
811 *qindex = be32_to_cpu(opal_qindex);
812
813 return 0;
814 }
815 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
816
xive_native_set_queue_state(u32 vp_id,u32 prio,u32 qtoggle,u32 qindex)817 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
818 {
819 s64 rc;
820
821 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
822 if (rc) {
823 pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
824 vp_id, prio, rc);
825 return -EIO;
826 }
827
828 return 0;
829 }
830 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
831
xive_native_has_queue_state_support(void)832 bool xive_native_has_queue_state_support(void)
833 {
834 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
835 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
836 }
837 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
838
xive_native_get_vp_state(u32 vp_id,u64 * out_state)839 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
840 {
841 __be64 state;
842 s64 rc;
843
844 rc = opal_xive_get_vp_state(vp_id, &state);
845 if (rc) {
846 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
847 vp_id, rc);
848 return -EIO;
849 }
850
851 if (out_state)
852 *out_state = be64_to_cpu(state);
853 return 0;
854 }
855 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
856
857 machine_arch_initcall(powernv, xive_core_debug_init);
858