Lines Matching refs:kvm

262 static int sca_switch_to_extended(struct kvm *kvm);
295 struct kvm *kvm; in kvm_clock_sync() local
300 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
301 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
304 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
305 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
542 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
584 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
663 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
668 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
684 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
699 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
707 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
710 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
716 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
727 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
731 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
736 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
741 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
750 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
751 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
755 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
756 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
764 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
765 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
767 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
768 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
771 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
772 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
775 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
776 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
780 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
783 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
784 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
789 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
790 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
795 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
796 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
799 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
800 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
803 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
804 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
808 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
809 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
812 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
813 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
816 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
817 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
822 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
823 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
826 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
827 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
830 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
831 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
835 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
836 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
838 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
842 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
843 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
844 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
850 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
851 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
853 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
854 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
858 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
859 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
863 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
864 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
865 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
870 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
871 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
874 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
875 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
878 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
879 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s", in kvm_vm_ioctl_enable_cap()
889 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
896 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
897 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
898 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
908 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
918 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
919 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
920 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
922 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
925 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
927 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
930 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
937 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
940 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
941 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
942 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
943 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
944 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
945 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
951 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
957 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
958 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
969 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
970 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
977 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
978 new->private = kvm; in kvm_s390_set_mem_control()
979 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
983 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
984 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
985 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
986 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
998 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
1003 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1005 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
1011 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1014 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
1016 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1019 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1020 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1024 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1025 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1026 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1027 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1030 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1031 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1035 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1036 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1037 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1038 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1041 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1042 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1045 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1046 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1047 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1048 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1051 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1052 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1055 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1056 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1057 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1058 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1062 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1065 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1069 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1072 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1075 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1079 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
1080 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1087 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1094 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) in kvm_s390_vcpu_pci_enable_interp() argument
1099 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1108 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1110 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1112 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_pci_enable_interp()
1117 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1120 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1125 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1133 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1141 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1143 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1147 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1148 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1164 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1165 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1166 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1174 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1177 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1179 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1180 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1181 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1185 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1190 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1193 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1196 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1201 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1206 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1209 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1219 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1221 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1228 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1230 __kvm_s390_set_tod_clock(kvm, &gtod); in kvm_s390_set_tod_ext()
1232 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1238 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1248 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1253 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1261 __kvm_s390_set_tod_clock(kvm, &gtod); in kvm_s390_set_tod_low()
1262 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1266 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1273 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1278 if (kvm_s390_pv_is_protected(kvm)) { in kvm_s390_set_tod()
1285 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1288 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1291 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1299 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1303 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1312 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1314 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1315 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1323 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1328 kvm_s390_get_tod_clock(kvm, &gtod); in kvm_s390_get_tod_ext()
1332 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1337 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1344 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1349 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1353 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1356 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1361 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1370 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1373 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1376 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1385 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1391 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1392 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1403 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1408 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1410 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1412 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1414 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1416 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1417 kvm->arch.model.ibc, in kvm_s390_set_processor()
1418 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1419 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1420 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1421 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1422 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1427 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1431 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1443 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1444 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1445 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1448 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1449 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1450 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1457 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1460 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1461 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1462 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1466 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1468 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1471 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1473 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1474 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1475 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1476 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1477 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1478 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1479 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1480 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1481 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1482 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1483 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1484 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1485 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1486 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1487 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1488 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1489 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1490 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1491 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1492 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1493 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1494 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1495 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1496 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1497 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1498 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1499 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1501 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1502 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1503 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1504 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1505 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1506 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1508 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1509 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1510 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1511 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1512 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1513 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1514 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1517 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1520 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1522 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1525 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1528 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1543 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_uv_feat() argument
1554 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1555 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1556 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1559 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1560 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1562 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1567 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1573 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1576 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1579 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1582 ret = kvm_s390_set_uv_feat(kvm, attr); in kvm_s390_set_cpu_model()
1588 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1598 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1599 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1600 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1602 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1603 kvm->arch.model.ibc, in kvm_s390_get_processor()
1604 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1605 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1606 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1607 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1608 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1616 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1628 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1632 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1633 kvm->arch.model.ibc, in kvm_s390_get_machine()
1634 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1635 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1639 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1650 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1655 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1658 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1665 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1673 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1680 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1683 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1687 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1688 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1689 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1690 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1691 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1692 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1693 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1694 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1695 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1696 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1697 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1698 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1699 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1700 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1701 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1702 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1703 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1704 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1705 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1706 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1707 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1708 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1709 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1710 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1711 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1712 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1713 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1714 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1715 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1716 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1717 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1718 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1719 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1720 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1721 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1722 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1723 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1724 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1725 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1726 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1727 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1728 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1729 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1730 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1731 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1733 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1734 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1736 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1739 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1742 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1748 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1755 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1760 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1763 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1766 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1769 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1772 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1775 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1778 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1781 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1784 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1787 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1790 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1793 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1796 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1799 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1802 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1807 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1816 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor_uv_feat() argument
1819 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1823 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1828 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine_uv_feat() argument
1838 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1843 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1849 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1852 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1855 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1858 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1861 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1864 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1867 ret = kvm_s390_get_processor_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1870 ret = kvm_s390_get_machine_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1887 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) in kvm_s390_update_topology_change_report() argument
1892 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1893 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1899 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1902 static int kvm_s390_set_topo_change_indication(struct kvm *kvm, in kvm_s390_set_topo_change_indication() argument
1905 if (!test_kvm_facility(kvm, 11)) in kvm_s390_set_topo_change_indication()
1908 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1912 static int kvm_s390_get_topo_change_indication(struct kvm *kvm, in kvm_s390_get_topo_change_indication() argument
1917 if (!test_kvm_facility(kvm, 11)) in kvm_s390_get_topo_change_indication()
1920 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1921 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1922 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1927 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1933 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1936 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1939 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1942 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1945 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1948 ret = kvm_s390_set_topo_change_indication(kvm, attr); in kvm_s390_vm_set_attr()
1958 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
1964 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
1967 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
1970 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
1973 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
1976 ret = kvm_s390_get_topo_change_indication(kvm, attr); in kvm_s390_vm_get_attr()
1986 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
2054 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2064 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
2086 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2088 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2098 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2112 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
2144 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2147 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2169 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2185 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
2192 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
2199 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2242 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2246 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2253 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2262 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2267 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2268 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2285 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2301 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2308 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2315 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2319 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2324 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2333 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2334 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2336 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2338 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2339 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2340 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2342 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2343 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2359 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2368 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2390 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2391 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2393 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2402 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2404 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2405 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2407 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2408 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2409 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2410 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2430 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_from_pv() argument
2445 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2456 kvm_s390_gisa_enable(kvm); in kvm_s390_cpus_from_pv()
2471 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2481 kvm_s390_gisa_disable(kvm); in kvm_s390_cpus_to_pv()
2483 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2491 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2542 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, in kvm_s390_pv_dmp() argument
2550 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2557 kvm_s390_vcpu_block_all(kvm); in kvm_s390_pv_dmp()
2559 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dmp()
2561 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x", in kvm_s390_pv_dmp()
2564 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2566 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dmp()
2572 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2580 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len, in kvm_s390_pv_dmp()
2585 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2592 r = kvm_s390_pv_dump_complete(kvm, result_buff, in kvm_s390_pv_dmp()
2604 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2612 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2617 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2624 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2634 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2638 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2640 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2643 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2648 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) in kvm_s390_handle_pv()
2651 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2659 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2662 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2669 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2673 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2676 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2684 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2687 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2695 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2715 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2725 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2732 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2738 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2741 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2743 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2749 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2752 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2754 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2760 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2763 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2765 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2813 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2820 r = kvm_s390_pv_dmp(kvm, cmd, dmp); in kvm_s390_handle_pv()
2835 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2855 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_abs() argument
2873 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2875 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2882 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2886 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2897 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2902 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2908 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_cmpxchg() argument
2935 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2937 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2942 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2948 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
2952 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op() argument
2963 if (kvm_s390_pv_get_handle(kvm)) in kvm_s390_vm_mem_op()
2969 return kvm_s390_vm_mem_op_abs(kvm, mop); in kvm_s390_vm_mem_op()
2971 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); in kvm_s390_vm_mem_op()
2979 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
2991 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
2998 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
3001 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
3009 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3016 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3023 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3033 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3043 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3052 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3053 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3054 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3068 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3069 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3070 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3077 kvm_s390_set_user_cpu_state_ctrl(kvm); in kvm_arch_vm_ioctl()
3092 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
3103 r = kvm_s390_vm_mem_op(kvm, &mem_op); in kvm_arch_vm_ioctl()
3118 r = kvm_s390_pci_zpci_op(kvm, &args); in kvm_arch_vm_ioctl()
3148 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
3150 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
3153 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3156 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
3160 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3162 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3180 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
3183 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3185 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
3187 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3190 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3193 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3196 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3204 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
3213 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
3214 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
3230 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
3232 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
3234 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3235 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3236 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3237 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3239 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
3241 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
3242 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
3255 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
3257 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3258 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
3259 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3261 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
3265 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3266 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3267 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3268 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3269 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3270 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3273 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
3275 if (kvm->arch.use_esca) in sca_dispose()
3276 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3278 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3279 kvm->arch.sca = NULL; in sca_dispose()
3282 void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
3285 kvm_s390_pci_clear_list(kvm); in kvm_arch_free_vm()
3287 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
3290 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
3316 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3318 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3319 if (!kvm->arch.sca) in kvm_arch_init_vm()
3325 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3326 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3331 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3332 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3336 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3338 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3341 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3342 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3345 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3348 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3351 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3354 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3355 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3357 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3358 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3360 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3361 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3365 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3367 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3368 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3370 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3372 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
3375 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3376 kvm_s390_pci_init_list(kvm); in kvm_arch_init_vm()
3377 kvm_s390_vcpu_pci_enable_interp(kvm); in kvm_arch_init_vm()
3378 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3381 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3382 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3384 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3385 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3386 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3388 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3389 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
3392 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3393 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3396 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3398 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3400 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3401 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3403 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3404 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3407 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3408 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3409 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3410 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
3412 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
3413 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3414 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3415 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3419 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3420 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3421 sca_dispose(kvm); in kvm_arch_init_vm()
3434 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3436 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3438 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3441 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3449 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
3453 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm()
3454 sca_dispose(kvm); in kvm_arch_destroy_vm()
3455 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
3461 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
3468 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3469 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3471 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3472 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3473 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
3474 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3475 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
3476 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
3477 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
3478 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
3487 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3496 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3497 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3498 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3503 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3508 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3514 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3521 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3522 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3523 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3532 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3540 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3561 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
3563 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3570 if (kvm->arch.use_esca) in sca_switch_to_extended()
3581 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
3582 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3586 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
3591 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3592 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3594 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3595 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
3599 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3600 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3604 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
3618 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3733 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3735 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3736 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3738 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3739 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3740 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3743 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3749 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3751 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3757 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3760 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3761 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3762 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3763 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3764 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3774 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3777 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3782 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3786 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3789 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3793 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3816 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3819 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3832 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3834 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3842 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3844 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3846 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3848 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3851 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3853 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3864 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3868 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3870 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3885 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3899 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3900 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3905 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3910 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3912 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3936 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3949 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3951 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3953 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3955 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3965 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3971 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3979 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
3983 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3992 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4051 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
4061 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
4199 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4431 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4528 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4529 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4540 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in __kvm_s390_set_tod_clock() argument
4550 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4551 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4552 if (test_kvm_facility(kvm, 139)) { in __kvm_s390_set_tod_clock()
4553 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4554 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4555 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4558 kvm_s390_vcpu_block_all(kvm); in __kvm_s390_set_tod_clock()
4559 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvm_s390_set_tod_clock()
4560 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4561 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4564 kvm_s390_vcpu_unblock_all(kvm); in __kvm_s390_set_tod_clock()
4568 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in kvm_s390_try_set_tod_clock() argument
4570 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4572 __kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_try_set_tod_clock()
4573 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4606 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4660 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4685 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4700 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4783 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4895 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4906 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4915 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5047 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5071 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5190 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
5195 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
5217 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5218 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5224 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5230 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5243 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5259 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5273 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5274 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5280 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5297 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5313 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5327 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5328 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5329 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5330 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5434 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5450 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5507 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5554 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5556 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5622 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5639 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5744 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5753 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument
5759 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
5767 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
5785 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5789 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5802 WARN(kvm_s390_vm_stop_migration(kvm), in kvm_arch_prepare_memory_region()
5808 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
5817 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5821 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5827 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()