Lines Matching refs:xive

125 	if (xc->xive->single_escalation)  in xive_attach_escalation()
157 if (xc->xive->single_escalation) { in xive_attach_escalation()
178 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue() local
187 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); in xive_provision_queue()
193 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
203 xive->q_order, true); in xive_provision_queue()
213 struct kvmppc_xive *xive = kvm->arch.xive; in xive_check_provisioning() local
220 if (xive->qmap & (1 << prio)) in xive_check_provisioning()
230 if (rc == 0 && !xive->single_escalation) in xive_check_provisioning()
238 xive->qmap |= (1 << prio); in xive_check_provisioning()
320 static u32 xive_vp(struct kvmppc_xive *xive, u32 server) in xive_vp() argument
322 return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); in xive_vp()
325 static u8 xive_lock_and_mask(struct kvmppc_xive *xive, in xive_lock_and_mask() argument
370 xive_vp(xive, state->act_server), in xive_lock_and_mask()
405 static void xive_finish_unmask(struct kvmppc_xive *xive, in xive_finish_unmask() argument
426 xive_vp(xive, state->act_server), in xive_finish_unmask()
466 struct kvmppc_xive *xive = kvm->arch.xive; in xive_target_interrupt() local
503 xive_vp(xive, server), in xive_target_interrupt()
550 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_xive() local
557 if (!xive) in kvmppc_xive_set_xive()
565 rc = xive_check_provisioning(xive->kvm, in kvmppc_xive_set_xive()
572 sb = kvmppc_xive_find_source(xive, irq, &idx); in kvmppc_xive_set_xive()
592 xive_lock_and_mask(xive, sb, state); in kvmppc_xive_set_xive()
633 xive_finish_unmask(xive, sb, state, priority); in kvmppc_xive_set_xive()
648 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_get_xive() local
653 if (!xive) in kvmppc_xive_get_xive()
656 sb = kvmppc_xive_find_source(xive, irq, &idx); in kvmppc_xive_get_xive()
670 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_on() local
675 if (!xive) in kvmppc_xive_int_on()
678 sb = kvmppc_xive_find_source(xive, irq, &idx); in kvmppc_xive_int_on()
701 xive_finish_unmask(xive, sb, state, state->saved_priority); in kvmppc_xive_int_on()
709 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_off() local
714 if (!xive) in kvmppc_xive_int_off()
717 sb = kvmppc_xive_find_source(xive, irq, &idx); in kvmppc_xive_int_off()
727 state->saved_priority = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_int_off()
733 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) in xive_restore_pending_irq() argument
739 sb = kvmppc_xive_find_source(xive, irq, &idx); in xive_restore_pending_irq()
771 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_set_icp() local
775 if (!xc || !xive) in kvmppc_xive_set_icp()
817 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { in kvmppc_xive_set_icp()
819 xive->delayed_irqs++; in kvmppc_xive_set_icp()
829 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_mapped() local
839 if (!xive) in kvmppc_xive_set_mapped()
844 sb = kvmppc_xive_find_source(xive, guest_irq, &idx); in kvmppc_xive_set_mapped()
873 prio = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_set_mapped()
891 xive_vp(xive, state->act_server), in kvmppc_xive_set_mapped()
919 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_clr_mapped() local
927 if (!xive) in kvmppc_xive_clr_mapped()
932 sb = kvmppc_xive_find_source(xive, guest_irq, &idx); in kvmppc_xive_clr_mapped()
942 prio = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_clr_mapped()
967 xive_vp(xive, state->act_server), in kvmppc_xive_clr_mapped()
993 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_disable_vcpu_interrupts() local
996 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
997 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_disable_vcpu_interrupts()
1028 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_cleanup_vcpu() local
1057 xive->q_page_order); in kvmppc_xive_cleanup_vcpu()
1074 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_connect_vcpu() local
1084 if (xive->kvm != vcpu->kvm) in kvmppc_xive_connect_vcpu()
1103 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1106 xc->vp_id = xive_vp(xive, cpu); in kvmppc_xive_connect_vcpu()
1135 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1152 if (i == 7 && xive->single_escalation) in kvmppc_xive_connect_vcpu()
1156 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
1158 if (r == 0 && !xive->single_escalation) in kvmppc_xive_connect_vcpu()
1197 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) in xive_pre_save_set_queued() argument
1203 sb = kvmppc_xive_find_source(xive, irq, &idx); in xive_pre_save_set_queued()
1227 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, in xive_pre_save_mask_irq() argument
1237 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); in xive_pre_save_mask_irq()
1247 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, in xive_pre_save_unmask_irq() argument
1265 xive_finish_unmask(xive, sb, state, state->saved_scan_prio); in xive_pre_save_unmask_irq()
1271 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) in xive_pre_save_queue() argument
1280 xive_pre_save_set_queued(xive, irq); in xive_pre_save_queue()
1284 static void xive_pre_save_scan(struct kvmppc_xive *xive) in xive_pre_save_scan() argument
1293 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1294 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
1298 xive_pre_save_mask_irq(xive, sb, j); in xive_pre_save_scan()
1302 kvm_for_each_vcpu(i, vcpu, xive->kvm) { in xive_pre_save_scan()
1308 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
1313 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1314 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
1318 xive_pre_save_unmask_irq(xive, sb, j); in xive_pre_save_scan()
1322 static void xive_post_save_scan(struct kvmppc_xive *xive) in xive_post_save_scan() argument
1327 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
1328 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_post_save_scan()
1336 xive->saved_src_count = 0; in xive_post_save_scan()
1342 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) in xive_get_source() argument
1350 sb = kvmppc_xive_find_source(xive, irq, &idx); in xive_get_source()
1377 if (xive->saved_src_count == 0) in xive_get_source()
1378 xive_pre_save_scan(xive); in xive_get_source()
1379 xive->saved_src_count++; in xive_get_source()
1415 if (xive->saved_src_count == xive->src_count) in xive_get_source()
1416 xive_post_save_scan(xive); in xive_get_source()
1425 static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, in xive_create_src_block() argument
1428 struct kvm *kvm = xive->kvm; in xive_create_src_block()
1437 if (xive->src_blocks[bid]) in xive_create_src_block()
1454 xive->src_blocks[bid] = sb; in xive_create_src_block()
1456 if (bid > xive->max_sbid) in xive_create_src_block()
1457 xive->max_sbid = bid; in xive_create_src_block()
1461 return xive->src_blocks[bid]; in xive_create_src_block()
1464 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) in xive_check_delayed_irq() argument
1466 struct kvm *kvm = xive->kvm; in xive_check_delayed_irq()
1478 xive->delayed_irqs--; in xive_check_delayed_irq()
1485 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) in xive_set_source() argument
1502 sb = kvmppc_xive_find_source(xive, irq, &idx); in xive_set_source()
1505 sb = xive_create_src_block(xive, irq); in xive_set_source()
1547 xive_lock_and_mask(xive, sb, state); in xive_set_source()
1570 mutex_lock(&xive->kvm->lock); in xive_set_source()
1571 rc = xive_check_provisioning(xive->kvm, act_prio); in xive_set_source()
1572 mutex_unlock(&xive->kvm->lock); in xive_set_source()
1576 rc = xive_target_interrupt(xive->kvm, state, in xive_set_source()
1589 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { in xive_set_source()
1636 xive_finish_unmask(xive, sb, state, guest_prio); in xive_set_source()
1642 xive->src_count++; in xive_set_source()
1651 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_irq() local
1656 if (!xive) in kvmppc_xive_set_irq()
1659 sb = kvmppc_xive_find_source(xive, irq, &idx); in kvmppc_xive_set_irq()
1687 struct kvmppc_xive *xive = dev->private; in xive_set_attr() local
1692 return xive_set_source(xive, attr->attr, attr->addr); in xive_set_attr()
1699 struct kvmppc_xive *xive = dev->private; in xive_get_attr() local
1704 return xive_get_source(xive, attr->attr, attr->addr); in xive_get_attr()
1752 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_free() local
1753 struct kvm *kvm = xive->kvm; in kvmppc_xive_free()
1756 debugfs_remove(xive->dentry); in kvmppc_xive_free()
1759 kvm->arch.xive = NULL; in kvmppc_xive_free()
1762 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_free()
1763 if (xive->src_blocks[i]) in kvmppc_xive_free()
1764 kvmppc_xive_free_sources(xive->src_blocks[i]); in kvmppc_xive_free()
1765 kfree(xive->src_blocks[i]); in kvmppc_xive_free()
1766 xive->src_blocks[i] = NULL; in kvmppc_xive_free()
1769 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_free()
1770 xive_native_free_vp_block(xive->vp_base); in kvmppc_xive_free()
1773 kfree(xive); in kvmppc_xive_free()
1779 struct kvmppc_xive *xive; in kvmppc_xive_create() local
1785 xive = kzalloc(sizeof(*xive), GFP_KERNEL); in kvmppc_xive_create()
1786 if (!xive) in kvmppc_xive_create()
1789 dev->private = xive; in kvmppc_xive_create()
1790 xive->dev = dev; in kvmppc_xive_create()
1791 xive->kvm = kvm; in kvmppc_xive_create()
1794 if (kvm->arch.xive) in kvmppc_xive_create()
1797 kvm->arch.xive = xive; in kvmppc_xive_create()
1800 xive->q_order = xive_native_default_eq_shift(); in kvmppc_xive_create()
1801 if (xive->q_order < PAGE_SHIFT) in kvmppc_xive_create()
1802 xive->q_page_order = 0; in kvmppc_xive_create()
1804 xive->q_page_order = xive->q_order - PAGE_SHIFT; in kvmppc_xive_create()
1807 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); in kvmppc_xive_create()
1808 pr_devel("VP_Base=%x\n", xive->vp_base); in kvmppc_xive_create()
1810 if (xive->vp_base == XIVE_INVALID_VP) in kvmppc_xive_create()
1813 xive->single_escalation = xive_native_has_single_escalation(); in kvmppc_xive_create()
1816 kfree(xive); in kvmppc_xive_create()
1826 struct kvmppc_xive *xive = m->private; in xive_debug_show() local
1827 struct kvm *kvm = xive->kvm; in xive_debug_show()
1920 static void xive_debugfs_init(struct kvmppc_xive *xive) in xive_debugfs_init() argument
1924 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); in xive_debugfs_init()
1930 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, in xive_debugfs_init()
1931 xive, &xive_debug_fops); in xive_debugfs_init()
1939 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; in kvmppc_xive_init() local
1942 xive_debugfs_init(xive); in kvmppc_xive_init()