Lines Matching +full:ipa +full:- +full:clock +full:- +full:query
1 // SPDX-License-Identifier: GPL-2.0
14 #define KMSG_COMPONENT "kvm-s390"
36 #include <asm/asm-offsets.h>
48 #include "kvm-s390.h"
53 #include "trace-s390.h"
201 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
222 /* available subfunctions indicated via query / "test bit" */
233 /* every s390 is virtualization enabled ;-) */ in kvm_arch_hardware_enable()
253 * -delta to the epoch. in kvm_clock_sync_scb()
255 delta = -delta; in kvm_clock_sync_scb()
257 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
259 delta_idx = -1; in kvm_clock_sync_scb()
261 scb->epoch += delta; in kvm_clock_sync_scb()
262 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
263 scb->epdx += delta_idx; in kvm_clock_sync_scb()
264 if (scb->epoch < delta) in kvm_clock_sync_scb()
265 scb->epdx += 1; in kvm_clock_sync_scb()
285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
290 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
291 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
292 if (vcpu->arch.vsie_block) in kvm_clock_sync()
293 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
344 static __always_inline void __insn32_query(unsigned int opcode, u8 *query) in __insn32_query() argument
346 register unsigned long r0 asm("0") = 0; /* query function */ in __insn32_query()
347 register unsigned long r1 asm("1") = (unsigned long) query; in __insn32_query()
369 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
465 int rc = -ENOMEM; in kvm_arch_init()
467 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
469 return -ENOMEM; in kvm_arch_init()
471 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
512 return -EINVAL; in kvm_arch_dev_ioctl()
597 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
637 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
639 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
641 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
642 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
652 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
656 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
674 if (cap->flags) in kvm_vm_ioctl_enable_cap()
675 return -EINVAL; in kvm_vm_ioctl_enable_cap()
677 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
680 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
685 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
689 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
690 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
691 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
693 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
694 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
696 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
697 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
700 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
701 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
704 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
705 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
708 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
709 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
713 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
714 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
719 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
720 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
721 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
722 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
725 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
728 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
733 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
734 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
735 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
737 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
738 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
741 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
746 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
747 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
748 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
749 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
755 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
762 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
764 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
767 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
768 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
769 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
775 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
776 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
778 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
784 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
789 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
794 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
804 switch (attr->attr) { in kvm_s390_get_mem_control()
807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
808 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
810 ret = -EFAULT; in kvm_s390_get_mem_control()
813 ret = -ENXIO; in kvm_s390_get_mem_control()
823 switch (attr->attr) { in kvm_s390_set_mem_control()
825 ret = -ENXIO; in kvm_s390_set_mem_control()
830 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
831 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
832 ret = -EBUSY; in kvm_s390_set_mem_control()
833 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
834 ret = -EINVAL; in kvm_s390_set_mem_control()
836 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
838 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
841 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
844 ret = -ENXIO; in kvm_s390_set_mem_control()
847 ret = -EINVAL; in kvm_s390_set_mem_control()
848 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
852 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
853 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
854 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
855 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
856 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
863 return -EINVAL; in kvm_s390_set_mem_control()
865 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
866 return -EFAULT; in kvm_s390_set_mem_control()
868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
869 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
870 return -E2BIG; in kvm_s390_set_mem_control()
873 return -EINVAL; in kvm_s390_set_mem_control()
877 new_limit -= 1; in kvm_s390_set_mem_control()
879 ret = -EBUSY; in kvm_s390_set_mem_control()
880 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
881 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
883 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
886 ret = -ENOMEM; in kvm_s390_set_mem_control()
888 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
889 new->private = kvm; in kvm_s390_set_mem_control()
890 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
894 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
897 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
901 ret = -ENXIO; in kvm_s390_set_mem_control()
927 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
928 switch (attr->attr) { in kvm_s390_vm_set_crypto()
931 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
932 return -EINVAL; in kvm_s390_vm_set_crypto()
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
937 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
942 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
943 return -EINVAL; in kvm_s390_vm_set_crypto()
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
948 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
953 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
954 return -EINVAL; in kvm_s390_vm_set_crypto()
956 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
963 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
964 return -EINVAL; in kvm_s390_vm_set_crypto()
966 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
973 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
974 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
976 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
980 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
981 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
986 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
987 return -ENXIO; in kvm_s390_vm_set_crypto()
991 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1005 * Must be called with kvm->srcu held to avoid races on memslots, and with
1006 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1016 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1019 if (!slots || !slots->used_slots) in kvm_s390_vm_start_migration()
1020 return -EINVAL; in kvm_s390_vm_start_migration()
1022 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1023 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1027 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { in kvm_s390_vm_start_migration()
1028 ms = slots->memslots + slotnr; in kvm_s390_vm_start_migration()
1029 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1030 return -EINVAL; in kvm_s390_vm_start_migration()
1038 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1041 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1047 * Must be called with kvm->slots_lock to avoid races with ourselves and
1053 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1055 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1056 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1064 int res = -ENXIO; in kvm_s390_vm_set_migration()
1066 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1067 switch (attr->attr) { in kvm_s390_vm_set_migration()
1077 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1085 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1087 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1088 return -ENXIO; in kvm_s390_vm_get_migration()
1090 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1091 return -EFAULT; in kvm_s390_vm_get_migration()
1099 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1100 return -EFAULT; in kvm_s390_set_tod_ext()
1103 return -EINVAL; in kvm_s390_set_tod_ext()
1116 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1118 return -EFAULT; in kvm_s390_set_tod_high()
1121 return -EINVAL; in kvm_s390_set_tod_high()
1131 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1133 return -EFAULT; in kvm_s390_set_tod_low()
1144 if (attr->flags) in kvm_s390_set_tod()
1145 return -EINVAL; in kvm_s390_set_tod()
1147 switch (attr->attr) { in kvm_s390_set_tod()
1158 ret = -ENXIO; in kvm_s390_set_tod()
1173 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1174 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1176 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1177 if (gtod->tod < htod.tod) in kvm_s390_get_tod_clock()
1178 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1190 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1191 return -EFAULT; in kvm_s390_get_tod_ext()
1193 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1202 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1204 return -EFAULT; in kvm_s390_get_tod_high()
1205 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1215 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1216 return -EFAULT; in kvm_s390_get_tod_low()
1217 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1226 if (attr->flags) in kvm_s390_get_tod()
1227 return -EINVAL; in kvm_s390_get_tod()
1229 switch (attr->attr) { in kvm_s390_get_tod()
1240 ret = -ENXIO; in kvm_s390_get_tod()
1252 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1253 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1254 ret = -EBUSY; in kvm_s390_set_processor()
1259 ret = -ENOMEM; in kvm_s390_set_processor()
1262 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1264 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1267 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1268 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1269 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1270 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1271 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1273 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1275 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1278 kvm->arch.model.ibc, in kvm_s390_set_processor()
1279 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1281 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1282 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1283 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1285 ret = -EFAULT; in kvm_s390_set_processor()
1288 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1297 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1298 return -EFAULT; in kvm_s390_set_processor_feat()
1302 return -EINVAL; in kvm_s390_set_processor_feat()
1304 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1305 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1306 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1307 return -EBUSY; in kvm_s390_set_processor_feat()
1309 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1311 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1322 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1323 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1324 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1325 return -EBUSY; in kvm_s390_set_processor_subfunc()
1328 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1330 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1331 return -EFAULT; in kvm_s390_set_processor_subfunc()
1333 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1337 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1338 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1339 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1341 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1342 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1348 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1350 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1351 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1356 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1357 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1360 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1385 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1391 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1398 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1400 switch (attr->attr) { in kvm_s390_set_cpu_model()
1421 ret = -ENOMEM; in kvm_s390_get_processor()
1424 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1425 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1426 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1429 kvm->arch.model.ibc, in kvm_s390_get_processor()
1430 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1432 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1433 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1434 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1435 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1436 ret = -EFAULT; in kvm_s390_get_processor()
1449 ret = -ENOMEM; in kvm_s390_get_machine()
1452 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1453 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1454 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1456 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, in kvm_s390_get_machine()
1459 kvm->arch.model.ibc, in kvm_s390_get_machine()
1460 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1462 mach->fac_mask[0], in kvm_s390_get_machine()
1463 mach->fac_mask[1], in kvm_s390_get_machine()
1464 mach->fac_mask[2]); in kvm_s390_get_machine()
1466 mach->fac_list[0], in kvm_s390_get_machine()
1467 mach->fac_list[1], in kvm_s390_get_machine()
1468 mach->fac_list[2]); in kvm_s390_get_machine()
1469 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1470 ret = -EFAULT; in kvm_s390_get_machine()
1481 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1483 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1484 return -EFAULT; in kvm_s390_get_processor_feat()
1500 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1501 return -EFAULT; in kvm_s390_get_machine_feat()
1512 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1514 return -EFAULT; in kvm_s390_get_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1519 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1522 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1531 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1566 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1572 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1580 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1582 return -EFAULT; in kvm_s390_get_machine_subfunc()
1647 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1649 switch (attr->attr) { in kvm_s390_get_cpu_model()
1676 switch (attr->group) { in kvm_s390_vm_set_attr()
1693 ret = -ENXIO; in kvm_s390_vm_set_attr()
1704 switch (attr->group) { in kvm_s390_vm_get_attr()
1718 ret = -ENXIO; in kvm_s390_vm_get_attr()
1729 switch (attr->group) { in kvm_s390_vm_has_attr()
1731 switch (attr->attr) { in kvm_s390_vm_has_attr()
1734 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1740 ret = -ENXIO; in kvm_s390_vm_has_attr()
1745 switch (attr->attr) { in kvm_s390_vm_has_attr()
1751 ret = -ENXIO; in kvm_s390_vm_has_attr()
1756 switch (attr->attr) { in kvm_s390_vm_has_attr()
1766 ret = -ENXIO; in kvm_s390_vm_has_attr()
1771 switch (attr->attr) { in kvm_s390_vm_has_attr()
1780 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1783 ret = -ENXIO; in kvm_s390_vm_has_attr()
1791 ret = -ENXIO; in kvm_s390_vm_has_attr()
1804 if (args->flags != 0) in kvm_s390_get_skeys()
1805 return -EINVAL; in kvm_s390_get_skeys()
1808 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
1812 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
1813 return -EINVAL; in kvm_s390_get_skeys()
1815 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_get_skeys()
1817 return -ENOMEM; in kvm_s390_get_skeys()
1819 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
1820 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1821 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
1822 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1824 r = -EFAULT; in kvm_s390_get_skeys()
1828 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
1832 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1833 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
1836 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
1837 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
1839 r = -EFAULT; in kvm_s390_get_skeys()
1853 if (args->flags != 0) in kvm_s390_set_skeys()
1854 return -EINVAL; in kvm_s390_set_skeys()
1857 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
1858 return -EINVAL; in kvm_s390_set_skeys()
1860 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_set_skeys()
1862 return -ENOMEM; in kvm_s390_set_skeys()
1864 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
1865 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
1867 r = -EFAULT; in kvm_s390_set_skeys()
1877 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
1878 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1879 while (i < args->count) { in kvm_s390_set_skeys()
1881 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1883 r = -EFAULT; in kvm_s390_set_skeys()
1889 r = -EINVAL; in kvm_s390_set_skeys()
1893 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
1895 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
1903 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1904 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
1926 int start = 0, end = slots->used_slots; in gfn_to_memslot_approx()
1927 int slot = atomic_read(&slots->lru_slot); in gfn_to_memslot_approx()
1928 struct kvm_memory_slot *memslots = slots->memslots; in gfn_to_memslot_approx()
1935 slot = start + (end - start) / 2; in gfn_to_memslot_approx()
1943 if (start >= slots->used_slots) in gfn_to_memslot_approx()
1944 return slots->used_slots - 1; in gfn_to_memslot_approx()
1948 atomic_set(&slots->lru_slot, start); in gfn_to_memslot_approx()
1957 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
1959 args->count = 0; in kvm_s390_peek_cmma()
1960 while (args->count < bufsize) { in kvm_s390_peek_cmma()
1967 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
1968 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
1970 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
1981 struct kvm_memory_slot *ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1982 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1984 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
1985 slotidx--; in kvm_s390_next_dirty_cmma()
1988 slotidx = slots->used_slots - 1; in kvm_s390_next_dirty_cmma()
1990 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1993 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
1994 while ((slotidx > 0) && (ofs >= ms->npages)) { in kvm_s390_next_dirty_cmma()
1995 slotidx--; in kvm_s390_next_dirty_cmma()
1996 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1997 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); in kvm_s390_next_dirty_cmma()
1999 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2009 if (unlikely(!slots->used_slots)) in kvm_s390_get_cmma()
2012 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2014 args->count = 0; in kvm_s390_get_cmma()
2015 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2019 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; in kvm_s390_get_cmma()
2021 while (args->count < bufsize) { in kvm_s390_get_cmma()
2026 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2027 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2028 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2031 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2040 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2044 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2068 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2069 return -ENXIO; in kvm_s390_get_cmma_bits()
2071 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2072 return -EINVAL; in kvm_s390_get_cmma_bits()
2073 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
2074 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2075 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2076 return -EINVAL; in kvm_s390_get_cmma_bits()
2078 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2079 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2084 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2091 return -ENOMEM; in kvm_s390_get_cmma_bits()
2093 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2094 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2099 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2100 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2102 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2103 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2105 args->remaining = 0; in kvm_s390_get_cmma_bits()
2107 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2108 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2117 * set and the mm->context.uses_cmm flag is set.
2126 mask = args->mask; in kvm_s390_set_cmma_bits()
2128 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2129 return -ENXIO; in kvm_s390_set_cmma_bits()
2131 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2132 return -EINVAL; in kvm_s390_set_cmma_bits()
2134 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2135 return -EINVAL; in kvm_s390_set_cmma_bits()
2137 if (args->count == 0) in kvm_s390_set_cmma_bits()
2140 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2142 return -ENOMEM; in kvm_s390_set_cmma_bits()
2144 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2146 r = -EFAULT; in kvm_s390_set_cmma_bits()
2150 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2151 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2152 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2153 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2155 r = -EFAULT; in kvm_s390_set_cmma_bits()
2162 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2164 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2165 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2167 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2168 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2169 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2170 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2193 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2197 ret = -EIO; in kvm_s390_cpus_from_pv()
2199 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2212 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2214 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2227 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2229 switch (cmd->cmd) { in kvm_s390_handle_pv()
2231 r = -EINVAL; in kvm_s390_handle_pv()
2243 mmap_write_lock(current->mm); in kvm_s390_handle_pv()
2245 mmap_write_unlock(current->mm); in kvm_s390_handle_pv()
2249 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2253 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2258 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2262 r = -EINVAL; in kvm_s390_handle_pv()
2266 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2274 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2277 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2284 r = -EINVAL; in kvm_s390_handle_pv()
2288 r = -EFAULT; in kvm_s390_handle_pv()
2293 r = -EINVAL; in kvm_s390_handle_pv()
2297 r = -ENOMEM; in kvm_s390_handle_pv()
2302 r = -EFAULT; in kvm_s390_handle_pv()
2306 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2314 r = -EINVAL; in kvm_s390_handle_pv()
2315 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2318 r = -EFAULT; in kvm_s390_handle_pv()
2323 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2327 r = -EINVAL; in kvm_s390_handle_pv()
2332 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2333 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2334 cmd->rrc); in kvm_s390_handle_pv()
2338 r = -EINVAL; in kvm_s390_handle_pv()
2343 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2345 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2349 r = -EINVAL; in kvm_s390_handle_pv()
2354 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2356 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2360 r = -ENOTTY; in kvm_s390_handle_pv()
2368 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2377 r = -EFAULT; in kvm_arch_vm_ioctl()
2386 r = -EINVAL; in kvm_arch_vm_ioctl()
2387 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2395 r = -EFAULT; in kvm_arch_vm_ioctl()
2402 r = -EFAULT; in kvm_arch_vm_ioctl()
2409 r = -EFAULT; in kvm_arch_vm_ioctl()
2418 r = -EFAULT; in kvm_arch_vm_ioctl()
2428 r = -EFAULT; in kvm_arch_vm_ioctl()
2438 r = -EFAULT; in kvm_arch_vm_ioctl()
2441 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2443 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2447 r = -EFAULT; in kvm_arch_vm_ioctl()
2454 r = -EFAULT; in kvm_arch_vm_ioctl()
2457 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2459 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2466 kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vm_ioctl()
2469 r = -EINVAL; in kvm_arch_vm_ioctl()
2473 r = -EFAULT; in kvm_arch_vm_ioctl()
2477 r = -EINVAL; in kvm_arch_vm_ioctl()
2480 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
2482 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
2484 r = -EFAULT; in kvm_arch_vm_ioctl()
2490 r = -ENOTTY; in kvm_arch_vm_ioctl()
2518 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2520 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
2521 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2528 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2530 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2536 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2538 mutex_lock(&kvm->lock); in kvm_arch_crypto_set_masks()
2541 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2543 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
2546 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
2549 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
2555 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
2556 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
2557 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
2569 mutex_unlock(&kvm->lock); in kvm_arch_crypto_set_masks()
2575 mutex_lock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2578 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2579 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2580 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2581 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2587 mutex_unlock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2602 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2609 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2610 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2611 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2612 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2613 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2614 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2619 if (kvm->arch.use_esca) in sca_dispose()
2620 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2622 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2623 kvm->arch.sca = NULL; in sca_dispose()
2633 rc = -EINVAL; in kvm_arch_init_vm()
2648 rc = -ENOMEM; in kvm_arch_init_vm()
2652 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2654 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2655 if (!kvm->arch.sca) in kvm_arch_init_vm()
2661 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2662 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2665 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
2667 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2668 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2672 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2674 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2677 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2678 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2681 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2684 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2687 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2689 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
2690 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2691 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2693 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2694 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2696 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2697 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2701 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2703 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2704 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2708 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2709 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2711 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2712 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2713 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2715 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2719 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2720 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2723 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2725 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2727 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2728 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2730 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2731 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2734 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2735 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2736 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2740 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2744 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2745 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2756 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
2759 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2762 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2763 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2765 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2770 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2781 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2782 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2783 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2785 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2786 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2797 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
2804 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2805 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2807 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2817 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2818 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2819 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
2820 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2829 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2830 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2831 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2833 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
2834 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2836 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2838 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
2839 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2841 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2847 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2850 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2851 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2854 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2855 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2856 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2858 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2859 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2860 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2861 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2862 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
2864 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2866 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2867 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2868 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2869 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
2871 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2877 d->sda = s->sda; in sca_copy_entry()
2878 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
2879 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
2886 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
2887 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
2889 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
2894 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2900 if (kvm->arch.use_esca) in sca_switch_to_extended()
2905 return -ENOMEM; in sca_switch_to_extended()
2911 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2916 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2917 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2918 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2920 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2921 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2923 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2928 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2929 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2947 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2948 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2949 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
2957 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
2958 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2959 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
2960 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2966 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
2967 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2968 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
2969 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
2970 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2976 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
2977 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
2984 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
2986 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3003 /* set the cpu timer - may only be called from the VCPU thread itself */
3007 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3008 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3009 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3010 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3011 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3015 /* update and get the cpu timer - can also be called from other VCPU threads */
3021 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3022 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3026 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3031 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3032 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3034 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3035 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3036 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3044 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3046 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3048 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3053 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3054 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3057 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3058 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3064 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3066 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3067 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3069 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3070 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3071 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3074 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3075 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3077 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3082 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3105 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3108 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3109 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3110 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3111 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3113 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3114 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3117 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3118 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3120 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3121 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3124 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3125 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3130 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
3131 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3136 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
3137 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
3138 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3144 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3146 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3147 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3148 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
3156 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3160 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3162 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3169 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3170 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3171 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3172 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3173 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3175 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3176 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3177 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3178 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3179 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3181 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3183 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3185 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3187 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3188 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3189 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3190 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3192 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3193 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3194 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3195 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3196 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3197 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3198 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3199 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3201 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_s390_vcpu_setup()
3203 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_s390_vcpu_setup()
3208 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3210 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3215 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3216 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3218 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3222 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3223 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3228 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3236 return -EINVAL; in kvm_arch_vcpu_precreate()
3248 return -ENOMEM; in kvm_arch_vcpu_create()
3250 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3251 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
3254 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3255 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3257 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3258 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3259 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3260 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
3261 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
3262 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3264 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3266 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3274 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3275 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3276 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3277 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3278 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3279 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3280 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3281 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3286 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3288 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3290 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3296 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3297 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3298 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3306 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3307 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3309 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3320 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
3325 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
3331 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
3336 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
3342 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
3348 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
3359 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
3373 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
3386 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
3387 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
3399 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
3415 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
3417 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
3419 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
3420 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3423 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
3424 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3428 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3431 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
3432 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3435 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
3436 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3439 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
3440 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3443 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
3444 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3447 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
3448 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3451 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
3452 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3464 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
3467 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
3469 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
3470 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3473 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
3474 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3477 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3482 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
3483 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3486 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
3487 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3488 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
3492 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
3493 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3496 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
3497 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3500 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
3501 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3504 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
3505 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3516 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
3517 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
3518 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
3521 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
3535 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3536 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3539 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3540 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
3541 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3542 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3545 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
3546 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3547 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3548 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3549 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3550 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3551 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3552 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3553 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3554 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3555 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3556 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3563 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3564 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3565 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
3566 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3572 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
3577 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
3578 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3579 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3580 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
3582 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3583 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3589 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
3597 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
3607 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
3608 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
3619 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
3620 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
3632 if (test_fp_ctl(fpu->fpc)) { in kvm_arch_vcpu_ioctl_set_fpu()
3633 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
3636 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
3638 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
3639 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
3641 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
3655 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
3656 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
3658 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
3659 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
3670 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
3672 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
3673 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
3681 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
3695 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3698 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3699 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3703 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3707 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3708 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
3712 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
3716 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3720 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3752 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
3753 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3755 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
3764 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3772 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3791 * We use MMU_RELOAD just to re-arm the ipte notifier for the in kvm_s390_handle_requests()
3799 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3810 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3816 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
3824 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
3831 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3841 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3847 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
3850 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3851 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3852 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3871 mutex_lock(&kvm->lock); in kvm_s390_set_tod_clock()
3876 kvm->arch.epoch = gtod->tod - htod.tod; in kvm_s390_set_tod_clock()
3877 kvm->arch.epdx = 0; in kvm_s390_set_tod_clock()
3879 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in kvm_s390_set_tod_clock()
3880 if (kvm->arch.epoch > gtod->tod) in kvm_s390_set_tod_clock()
3881 kvm->arch.epdx -= 1; in kvm_s390_set_tod_clock()
3886 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
3887 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in kvm_s390_set_tod_clock()
3892 mutex_unlock(&kvm->lock); in kvm_s390_set_tod_clock()
3896 * kvm_arch_fault_in_page - fault-in guest page if necessary
3901 * Make sure that a guest page has been faulted-in on the host.
3907 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3924 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
3931 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3932 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3940 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3941 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
3964 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
3966 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
3967 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
3973 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
3975 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
3978 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
3979 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
3980 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
3983 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
3997 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
3998 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4003 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4018 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4020 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4021 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4047 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4052 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
4056 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4070 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4071 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4076 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4077 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4079 if (exit_reason == -EINTR) { in vcpu_post_run()
4081 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4083 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4088 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4091 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4093 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4094 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4095 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4096 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4097 return -EREMOTE; in vcpu_post_run()
4098 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
4099 vcpu->stat.exit_null++; in vcpu_post_run()
4101 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4102 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
4103 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
4104 current->thread.gmap_addr; in vcpu_post_run()
4105 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
4106 return -EREMOTE; in vcpu_post_run()
4107 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
4109 current->thread.gmap_pfault = 0; in vcpu_post_run()
4112 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
4121 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4124 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4127 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4134 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4144 memcpy(sie_page->pv_grregs, in __vcpu_run()
4145 vcpu->run->s.regs.gprs, in __vcpu_run()
4146 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4148 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4149 vcpu->run->s.regs.gprs); in __vcpu_run()
4151 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
4152 sie_page->pv_grregs, in __vcpu_run()
4153 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4156 * that leave the guest state in an "in-between" state in __vcpu_run()
4160 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4161 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4162 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4169 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4174 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4180 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
4184 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
4185 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
4186 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4187 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4188 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
4189 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4190 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4191 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4193 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
4194 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4195 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4196 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4197 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4200 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
4201 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4202 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4208 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
4209 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4210 riccb->v && in sync_regs_fmt2()
4211 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4213 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4216 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
4219 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
4220 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4221 gscb->gssm && in sync_regs_fmt2()
4222 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4224 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4225 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4226 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4228 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
4229 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4230 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4231 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4236 if (current->thread.gs_cb) { in sync_regs_fmt2()
4237 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4238 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4240 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4241 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
4242 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
4243 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
4252 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
4254 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
4255 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
4256 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
4257 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4261 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
4262 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
4263 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4265 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4266 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
4269 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4270 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4272 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
4274 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
4275 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
4276 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
4278 current->thread.fpu.fpc = 0; in sync_regs()
4293 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4294 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4298 kvm_run->kvm_dirty_regs = 0; in sync_regs()
4303 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
4305 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4306 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4307 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4308 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4309 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
4312 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
4313 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
4315 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
4316 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
4318 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
4320 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
4327 struct kvm_run *kvm_run = vcpu->run; in store_regs()
4329 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
4330 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
4331 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
4332 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
4333 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
4334 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
4335 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
4336 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
4337 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
4338 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
4339 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
4342 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
4344 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
4345 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
4352 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
4355 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
4356 return -EINTR; in kvm_arch_vcpu_ioctl_run()
4358 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
4359 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
4360 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
4376 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
4380 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
4381 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
4392 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
4393 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
4401 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
4411 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
4420 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4421 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4434 return -EFAULT; in kvm_s390_store_status_unloaded()
4438 return -EFAULT; in kvm_s390_store_status_unloaded()
4441 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
4445 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
4450 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
4453 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
4455 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
4459 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
4461 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
4465 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
4469 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
4471 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
4472 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
4483 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
4484 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
4520 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
4522 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4523 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
4529 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4535 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
4540 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
4548 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
4558 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
4564 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4576 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
4578 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4579 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
4585 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4597 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
4599 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
4611 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4620 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4621 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4623 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4625 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4626 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4627 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
4628 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
4633 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4642 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_sida_op()
4645 if (mop->flags || !mop->size) in kvm_s390_guest_sida_op()
4646 return -EINVAL; in kvm_s390_guest_sida_op()
4647 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_guest_sida_op()
4648 return -EINVAL; in kvm_s390_guest_sida_op()
4649 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_guest_sida_op()
4650 return -E2BIG; in kvm_s390_guest_sida_op()
4652 switch (mop->op) { in kvm_s390_guest_sida_op()
4654 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4655 mop->sida_offset), mop->size)) in kvm_s390_guest_sida_op()
4656 r = -EFAULT; in kvm_s390_guest_sida_op()
4660 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4661 mop->sida_offset), uaddr, mop->size)) in kvm_s390_guest_sida_op()
4662 r = -EFAULT; in kvm_s390_guest_sida_op()
4670 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_mem_op()
4676 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) in kvm_s390_guest_mem_op()
4677 return -EINVAL; in kvm_s390_guest_mem_op()
4679 if (mop->size > MEM_OP_MAX_SIZE) in kvm_s390_guest_mem_op()
4680 return -E2BIG; in kvm_s390_guest_mem_op()
4683 return -EINVAL; in kvm_s390_guest_mem_op()
4685 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_guest_mem_op()
4686 tmpbuf = vmalloc(mop->size); in kvm_s390_guest_mem_op()
4688 return -ENOMEM; in kvm_s390_guest_mem_op()
4691 switch (mop->op) { in kvm_s390_guest_mem_op()
4693 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4694 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4695 mop->size, GACC_FETCH); in kvm_s390_guest_mem_op()
4698 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4700 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_guest_mem_op()
4701 r = -EFAULT; in kvm_s390_guest_mem_op()
4705 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4706 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4707 mop->size, GACC_STORE); in kvm_s390_guest_mem_op()
4710 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_guest_mem_op()
4711 r = -EFAULT; in kvm_s390_guest_mem_op()
4714 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4718 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_guest_mem_op()
4719 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
4730 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_memsida_op()
4732 switch (mop->op) { in kvm_s390_guest_memsida_op()
4739 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_guest_memsida_op()
4743 r = -EINVAL; in kvm_s390_guest_memsida_op()
4746 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_memsida_op()
4753 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
4761 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4769 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4771 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
4775 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
4781 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4791 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4793 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4798 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4838 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4841 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4855 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4859 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4860 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4864 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4872 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4876 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4877 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4881 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4887 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
4893 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4905 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4911 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4917 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4929 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4933 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4943 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
4953 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
4954 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
4955 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4956 get_page(vmf->page); in kvm_arch_vcpu_fault()
4974 if (mem->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
4975 return -EINVAL; in kvm_arch_prepare_memory_region()
4977 if (mem->memory_size & 0xffffful) in kvm_arch_prepare_memory_region()
4978 return -EINVAL; in kvm_arch_prepare_memory_region()
4980 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
4981 return -EINVAL; in kvm_arch_prepare_memory_region()
4985 return -EINVAL; in kvm_arch_prepare_memory_region()
4999 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5000 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5003 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5004 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5009 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()
5010 mem->guest_phys_addr, mem->memory_size); in kvm_arch_commit_memory_region()
5031 vcpu->valid_wakeup = false; in kvm_arch_vcpu_block_finish()
5040 return -ENODEV; in kvm_s390_init()
5045 return -EINVAL; in kvm_s390_init()