Lines Matching refs:matrix_mdev
27 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
50 struct ap_matrix_mdev *matrix_mdev, in vfio_ap_get_queue() argument
55 if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) in vfio_ap_get_queue()
57 if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) in vfio_ap_get_queue()
62 q->matrix_mdev = matrix_mdev; in vfio_ap_get_queue()
120 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { in vfio_ap_free_aqic_resources()
121 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); in vfio_ap_free_aqic_resources()
124 if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { in vfio_ap_free_aqic_resources()
125 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), in vfio_ap_free_aqic_resources()
182 q->matrix_mdev = NULL; in vfio_ap_irq_disable()
215 ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, in vfio_ap_irq_enable()
225 kvm = q->matrix_mdev->kvm; in vfio_ap_irq_enable()
244 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1); in vfio_ap_irq_enable()
285 struct ap_matrix_mdev *matrix_mdev; in handle_pqap() local
296 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, in handle_pqap()
300 if (!matrix_mdev->kvm) in handle_pqap()
303 q = vfio_ap_get_queue(matrix_mdev, apqn); in handle_pqap()
333 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe() local
339 matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); in vfio_ap_mdev_probe()
340 if (!matrix_mdev) { in vfio_ap_mdev_probe()
344 vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev, in vfio_ap_mdev_probe()
347 matrix_mdev->mdev = mdev; in vfio_ap_mdev_probe()
348 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); in vfio_ap_mdev_probe()
349 matrix_mdev->pqap_hook = handle_pqap; in vfio_ap_mdev_probe()
351 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); in vfio_ap_mdev_probe()
354 ret = vfio_register_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
357 dev_set_drvdata(&mdev->dev, matrix_mdev); in vfio_ap_mdev_probe()
362 list_del(&matrix_mdev->node); in vfio_ap_mdev_probe()
364 vfio_uninit_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
365 kfree(matrix_mdev); in vfio_ap_mdev_probe()
373 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); in vfio_ap_mdev_remove() local
375 vfio_unregister_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
378 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_remove()
379 list_del(&matrix_mdev->node); in vfio_ap_mdev_remove()
381 vfio_uninit_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
382 kfree(matrix_mdev); in vfio_ap_mdev_remove()
526 vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_verify_queues_reserved_for_apid() argument
531 unsigned long nbits = matrix_mdev->matrix.aqm_max + 1; in vfio_ap_mdev_verify_queues_reserved_for_apid()
533 if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits) in vfio_ap_mdev_verify_queues_reserved_for_apid()
536 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) { in vfio_ap_mdev_verify_queues_reserved_for_apid()
556 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_verify_no_sharing() argument
563 if (matrix_mdev == lstdev) in vfio_ap_mdev_verify_no_sharing()
573 if (!bitmap_and(apm, matrix_mdev->matrix.apm, in vfio_ap_mdev_verify_no_sharing()
577 if (!bitmap_and(aqm, matrix_mdev->matrix.aqm, in vfio_ap_mdev_verify_no_sharing()
623 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_adapter_store() local
628 if (matrix_mdev->kvm) { in assign_adapter_store()
637 if (apid > matrix_mdev->matrix.apm_max) { in assign_adapter_store()
647 ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid); in assign_adapter_store()
651 set_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
653 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); in assign_adapter_store()
661 clear_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
690 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_adapter_store() local
695 if (matrix_mdev->kvm) { in unassign_adapter_store()
704 if (apid > matrix_mdev->matrix.apm_max) { in unassign_adapter_store()
709 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); in unassign_adapter_store()
718 vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_verify_queues_reserved_for_apqi() argument
723 unsigned long nbits = matrix_mdev->matrix.apm_max + 1; in vfio_ap_mdev_verify_queues_reserved_for_apqi()
725 if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits) in vfio_ap_mdev_verify_queues_reserved_for_apqi()
728 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) { in vfio_ap_mdev_verify_queues_reserved_for_apqi()
774 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_domain_store() local
775 unsigned long max_apqi = matrix_mdev->matrix.aqm_max; in assign_domain_store()
780 if (matrix_mdev->kvm) { in assign_domain_store()
793 ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi); in assign_domain_store()
797 set_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
799 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); in assign_domain_store()
807 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
837 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_domain_store() local
842 if (matrix_mdev->kvm) { in unassign_domain_store()
851 if (apqi > matrix_mdev->matrix.aqm_max) { in unassign_domain_store()
856 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); in unassign_domain_store()
886 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_control_domain_store() local
891 if (matrix_mdev->kvm) { in assign_control_domain_store()
900 if (id > matrix_mdev->matrix.adm_max) { in assign_control_domain_store()
910 set_bit_inv(id, matrix_mdev->matrix.adm); in assign_control_domain_store()
938 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_control_domain_store() local
939 unsigned long max_domid = matrix_mdev->matrix.adm_max; in unassign_control_domain_store()
944 if (matrix_mdev->kvm) { in unassign_control_domain_store()
957 clear_bit_inv(domid, matrix_mdev->matrix.adm); in unassign_control_domain_store()
973 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in control_domains_show() local
974 unsigned long max_domid = matrix_mdev->matrix.adm_max; in control_domains_show()
977 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { in control_domains_show()
991 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in matrix_show() local
997 unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1; in matrix_show()
998 unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1; in matrix_show()
1002 apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits); in matrix_show()
1003 apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits); in matrix_show()
1008 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { in matrix_show()
1009 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in matrix_show()
1018 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { in matrix_show()
1024 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) { in matrix_show()
1075 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_set_kvm() argument
1082 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; in vfio_ap_mdev_set_kvm()
1089 if (m != matrix_mdev && m->kvm == kvm) { in vfio_ap_mdev_set_kvm()
1097 matrix_mdev->kvm = kvm; in vfio_ap_mdev_set_kvm()
1099 matrix_mdev->matrix.apm, in vfio_ap_mdev_set_kvm()
1100 matrix_mdev->matrix.aqm, in vfio_ap_mdev_set_kvm()
1101 matrix_mdev->matrix.adm); in vfio_ap_mdev_set_kvm()
1125 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_iommu_notifier() local
1127 matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier); in vfio_ap_mdev_iommu_notifier()
1133 vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1); in vfio_ap_mdev_iommu_notifier()
1153 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unset_kvm() argument
1165 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_unset_kvm()
1167 matrix_mdev->kvm = NULL; in vfio_ap_mdev_unset_kvm()
1178 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_group_notifier() local
1183 matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); in vfio_ap_mdev_group_notifier()
1186 vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); in vfio_ap_mdev_group_notifier()
1187 else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) in vfio_ap_mdev_group_notifier()
1259 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_reset_queues() argument
1266 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, in vfio_ap_mdev_reset_queues()
1267 matrix_mdev->matrix.apm_max + 1) { in vfio_ap_mdev_reset_queues()
1268 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in vfio_ap_mdev_reset_queues()
1269 matrix_mdev->matrix.aqm_max + 1) { in vfio_ap_mdev_reset_queues()
1287 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_open_device() local
1292 matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; in vfio_ap_mdev_open_device()
1296 &events, &matrix_mdev->group_notifier); in vfio_ap_mdev_open_device()
1300 matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; in vfio_ap_mdev_open_device()
1303 &events, &matrix_mdev->iommu_notifier); in vfio_ap_mdev_open_device()
1310 &matrix_mdev->group_notifier); in vfio_ap_mdev_open_device()
1316 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_close_device() local
1320 &matrix_mdev->iommu_notifier); in vfio_ap_mdev_close_device()
1322 &matrix_mdev->group_notifier); in vfio_ap_mdev_close_device()
1323 vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); in vfio_ap_mdev_close_device()
1349 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_ioctl() local
1359 ret = vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_ioctl()