Lines Matching refs:imsic

33 struct imsic {  struct
587 static void imsic_vsfile_cleanup(struct imsic *imsic) in imsic_vsfile_cleanup() argument
598 write_lock_irqsave(&imsic->vsfile_lock, flags); in imsic_vsfile_cleanup()
599 old_vsfile_hgei = imsic->vsfile_hgei; in imsic_vsfile_cleanup()
600 old_vsfile_cpu = imsic->vsfile_cpu; in imsic_vsfile_cleanup()
601 imsic->vsfile_cpu = imsic->vsfile_hgei = -1; in imsic_vsfile_cleanup()
602 imsic->vsfile_va = NULL; in imsic_vsfile_cleanup()
603 imsic->vsfile_pa = 0; in imsic_vsfile_cleanup()
604 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in imsic_vsfile_cleanup()
606 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in imsic_vsfile_cleanup()
614 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_extirq_update() local
615 struct imsic_mrif *mrif = imsic->swfile; in imsic_swfile_extirq_update()
618 imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) in imsic_swfile_extirq_update()
627 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_read() local
636 memcpy(mrif, imsic->swfile, sizeof(*mrif)); in imsic_swfile_read()
638 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in imsic_swfile_read()
648 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in imsic_swfile_update() local
649 struct imsic_mrif *smrif = imsic->swfile; in imsic_swfile_update()
653 for (i = 0; i < imsic->nr_eix; i++) { in imsic_swfile_update()
672 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_release() local
675 write_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_release()
676 old_vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_release()
677 old_vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_vcpu_aia_imsic_release()
678 imsic->vsfile_cpu = imsic->vsfile_hgei = -1; in kvm_riscv_vcpu_aia_imsic_release()
679 imsic->vsfile_va = NULL; in kvm_riscv_vcpu_aia_imsic_release()
680 imsic->vsfile_pa = 0; in kvm_riscv_vcpu_aia_imsic_release()
681 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_release()
708 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix, in kvm_riscv_vcpu_aia_imsic_release()
727 struct imsic *imsic = vaia->imsic_state; in kvm_riscv_vcpu_aia_imsic_update() local
735 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
736 old_vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_update()
737 old_vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_vcpu_aia_imsic_update()
738 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
773 imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix); in kvm_riscv_vcpu_aia_imsic_update()
785 write_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
786 imsic->vsfile_hgei = new_vsfile_hgei; in kvm_riscv_vcpu_aia_imsic_update()
787 imsic->vsfile_cpu = vcpu->cpu; in kvm_riscv_vcpu_aia_imsic_update()
788 imsic->vsfile_va = new_vsfile_va; in kvm_riscv_vcpu_aia_imsic_update()
789 imsic->vsfile_pa = new_vsfile_pa; in kvm_riscv_vcpu_aia_imsic_update()
790 write_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_update()
802 imsic->nr_hw_eix, true, &tmrif); in kvm_riscv_vcpu_aia_imsic_update()
812 imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif); in kvm_riscv_vcpu_aia_imsic_update()
836 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_rmw() local
840 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix, in kvm_riscv_vcpu_aia_imsic_rmw()
841 imsic->nr_msis); in kvm_riscv_vcpu_aia_imsic_rmw()
849 eix = &imsic->swfile->eix[topei / in kvm_riscv_vcpu_aia_imsic_rmw()
856 r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel, in kvm_riscv_vcpu_aia_imsic_rmw()
874 struct imsic *imsic; in kvm_riscv_aia_imsic_rw_attr() local
887 imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_aia_imsic_rw_attr()
889 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_aia_imsic_rw_attr()
892 vsfile_hgei = imsic->vsfile_hgei; in kvm_riscv_aia_imsic_rw_attr()
893 vsfile_cpu = imsic->vsfile_cpu; in kvm_riscv_aia_imsic_rw_attr()
896 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
900 rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
904 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_aia_imsic_rw_attr()
907 rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix, in kvm_riscv_aia_imsic_rw_attr()
916 struct imsic *imsic; in kvm_riscv_aia_imsic_has_attr() local
928 imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_aia_imsic_has_attr()
929 return imsic_mrif_isel_check(imsic->nr_eix, isel); in kvm_riscv_aia_imsic_has_attr()
934 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_reset() local
936 if (!imsic) in kvm_riscv_vcpu_aia_imsic_reset()
941 memset(imsic->swfile, 0, sizeof(*imsic->swfile)); in kvm_riscv_vcpu_aia_imsic_reset()
949 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_inject() local
952 if (!imsic || !iid || guest_index || in kvm_riscv_vcpu_aia_imsic_inject()
958 if (imsic->nr_msis <= iid) in kvm_riscv_vcpu_aia_imsic_inject()
961 read_lock_irqsave(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_inject()
963 if (imsic->vsfile_cpu >= 0) { in kvm_riscv_vcpu_aia_imsic_inject()
964 writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); in kvm_riscv_vcpu_aia_imsic_inject()
967 eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; in kvm_riscv_vcpu_aia_imsic_inject()
972 read_unlock_irqrestore(&imsic->vsfile_lock, flags); in kvm_riscv_vcpu_aia_imsic_inject()
1012 struct imsic *imsic; in kvm_riscv_vcpu_aia_imsic_init() local
1021 imsic = kzalloc(sizeof(*imsic), GFP_KERNEL); in kvm_riscv_vcpu_aia_imsic_init()
1022 if (!imsic) in kvm_riscv_vcpu_aia_imsic_init()
1024 vcpu->arch.aia_context.imsic_state = imsic; in kvm_riscv_vcpu_aia_imsic_init()
1027 imsic->nr_msis = kvm->arch.aia.nr_ids + 1; in kvm_riscv_vcpu_aia_imsic_init()
1028 rwlock_init(&imsic->vsfile_lock); in kvm_riscv_vcpu_aia_imsic_init()
1029 imsic->nr_eix = BITS_TO_U64(imsic->nr_msis); in kvm_riscv_vcpu_aia_imsic_init()
1030 imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids); in kvm_riscv_vcpu_aia_imsic_init()
1031 imsic->vsfile_hgei = imsic->vsfile_cpu = -1; in kvm_riscv_vcpu_aia_imsic_init()
1035 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_init()
1040 imsic->swfile = page_to_virt(swfile_page); in kvm_riscv_vcpu_aia_imsic_init()
1041 imsic->swfile_pa = page_to_phys(swfile_page); in kvm_riscv_vcpu_aia_imsic_init()
1044 kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); in kvm_riscv_vcpu_aia_imsic_init()
1049 &imsic->iodev); in kvm_riscv_vcpu_aia_imsic_init()
1057 free_pages((unsigned long)imsic->swfile, in kvm_riscv_vcpu_aia_imsic_init()
1058 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_init()
1061 kfree(imsic); in kvm_riscv_vcpu_aia_imsic_init()
1068 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; in kvm_riscv_vcpu_aia_imsic_cleanup() local
1070 if (!imsic) in kvm_riscv_vcpu_aia_imsic_cleanup()
1073 imsic_vsfile_cleanup(imsic); in kvm_riscv_vcpu_aia_imsic_cleanup()
1076 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev); in kvm_riscv_vcpu_aia_imsic_cleanup()
1079 free_pages((unsigned long)imsic->swfile, in kvm_riscv_vcpu_aia_imsic_cleanup()
1080 get_order(sizeof(*imsic->swfile))); in kvm_riscv_vcpu_aia_imsic_cleanup()
1083 kfree(imsic); in kvm_riscv_vcpu_aia_imsic_cleanup()