Lines Matching refs:arch
265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
270 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
271 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
272 if (vcpu->arch.vsie_block) in kvm_clock_sync()
273 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
528 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log()
619 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
624 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
632 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
633 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
635 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
636 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
639 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
640 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
655 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
656 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
668 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
669 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
682 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
683 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
694 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
706 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
707 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
715 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
720 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
739 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
740 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
767 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
769 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
779 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
785 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
799 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
800 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
819 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
821 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
828 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
862 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
863 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
864 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
869 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
870 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
871 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
875 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
876 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
877 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
881 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
882 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
883 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
917 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
923 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
924 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
939 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
940 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
952 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
954 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
955 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
984 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1072 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1075 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1163 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1168 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1170 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1172 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1174 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1177 kvm->arch.model.ibc, in kvm_s390_set_processor()
1178 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1180 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1181 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1182 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1208 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1256 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1257 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1258 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1261 kvm->arch.model.ibc, in kvm_s390_get_processor()
1262 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1264 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1265 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1266 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1286 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1291 kvm->arch.model.ibc, in kvm_s390_get_machine()
1292 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1313 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1733 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
1774 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
1781 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
1790 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
1808 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
1809 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
1834 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
1913 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2036 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2039 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2041 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2058 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2062 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2063 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2064 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2065 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2066 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2067 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2072 if (kvm->arch.use_esca) in sca_dispose()
2073 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2075 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2076 kvm->arch.sca = NULL; in sca_dispose()
2105 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2107 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2108 if (!kvm->arch.sca) in kvm_arch_init_vm()
2114 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2115 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2120 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2121 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2125 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2127 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2130 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2133 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2136 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2141 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2142 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2144 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2145 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2147 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2148 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2151 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2152 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2156 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2157 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2159 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2160 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2161 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2163 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2167 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2168 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2171 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2173 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2175 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2176 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2178 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2179 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2182 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2183 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2184 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2191 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2192 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2218 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2220 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2222 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2248 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2250 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2252 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2262 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2263 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2265 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2274 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2275 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2276 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2281 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2286 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2292 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2295 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2296 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2299 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2300 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2301 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2303 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2304 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2305 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2306 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2309 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2311 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2313 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2316 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2339 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2353 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2358 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2359 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2360 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2362 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2363 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2365 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2371 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2390 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2398 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_init()
2432 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
2433 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2434 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
2435 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2441 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
2442 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2443 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
2444 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
2445 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2451 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
2452 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
2459 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
2461 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
2482 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
2483 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
2484 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
2485 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
2486 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
2496 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
2497 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
2501 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
2507 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
2509 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
2510 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
2511 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
2519 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
2521 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
2529 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
2532 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
2533 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
2540 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset()
2541 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset()
2544 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset()
2545 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset()
2546 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); in kvm_s390_vcpu_initial_reset()
2547 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | in kvm_s390_vcpu_initial_reset()
2550 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | in kvm_s390_vcpu_initial_reset()
2556 vcpu->arch.sie_block->gbea = 1; in kvm_s390_vcpu_initial_reset()
2557 vcpu->arch.sie_block->pp = 0; in kvm_s390_vcpu_initial_reset()
2558 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_s390_vcpu_initial_reset()
2559 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_s390_vcpu_initial_reset()
2570 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
2571 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
2575 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
2578 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
2579 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
2581 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
2589 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
2591 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
2592 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
2593 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
2594 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
2596 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
2601 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
2602 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
2607 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
2608 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
2615 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
2617 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
2619 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
2626 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_arch_vcpu_setup()
2639 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_arch_vcpu_setup()
2641 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_arch_vcpu_setup()
2643 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_arch_vcpu_setup()
2645 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_arch_vcpu_setup()
2646 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_arch_vcpu_setup()
2648 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_arch_vcpu_setup()
2649 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_arch_vcpu_setup()
2651 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_arch_vcpu_setup()
2653 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_arch_vcpu_setup()
2655 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_arch_vcpu_setup()
2657 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_arch_vcpu_setup()
2659 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_arch_vcpu_setup()
2660 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_arch_vcpu_setup()
2663 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_arch_vcpu_setup()
2665 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_arch_vcpu_setup()
2666 if (vcpu->arch.sie_block->gd) { in kvm_arch_vcpu_setup()
2667 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_arch_vcpu_setup()
2669 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_arch_vcpu_setup()
2671 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_arch_vcpu_setup()
2673 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_arch_vcpu_setup()
2678 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_arch_vcpu_setup()
2680 if (vcpu->kvm->arch.use_cmma) { in kvm_arch_vcpu_setup()
2685 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_arch_vcpu_setup()
2686 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_arch_vcpu_setup()
2714 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
2715 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
2718 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
2719 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
2721 vcpu->arch.sie_block->icpua = id; in kvm_arch_vcpu_create()
2722 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
2723 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; in kvm_arch_vcpu_create()
2724 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
2725 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
2726 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
2732 vcpu->arch.sie_block); in kvm_arch_vcpu_create()
2733 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
2737 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
2751 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
2756 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
2762 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
2767 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
2773 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
2783 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
2832 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
2836 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
2844 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
2848 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
2852 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
2856 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
2860 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
2864 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
2882 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
2886 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
2895 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
2899 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
2901 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
2905 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
2909 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
2913 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
2917 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
2955 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
2967 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
3063 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3100 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3140 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3151 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3172 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3182 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3191 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3193 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3215 kvm->arch.epoch = gtod->tod - htod.tod; in kvm_s390_set_tod_clock()
3216 kvm->arch.epdx = 0; in kvm_s390_set_tod_clock()
3218 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in kvm_s390_set_tod_clock()
3219 if (kvm->arch.epoch > gtod->tod) in kvm_s390_set_tod_clock()
3220 kvm->arch.epdx -= 1; in kvm_s390_set_tod_clock()
3225 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
3226 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in kvm_s390_set_tod_clock()
3246 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3270 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3271 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3277 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3278 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
3299 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
3302 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
3304 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
3305 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
3311 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
3313 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
3318 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
3321 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
3336 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
3337 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
3360 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
3361 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
3387 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
3396 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
3410 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
3411 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
3416 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
3417 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
3421 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
3428 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
3434 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
3435 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
3436 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
3481 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
3503 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs()
3504 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs()
3508 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
3514 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
3515 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs()
3516 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs()
3517 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs()
3520 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs()
3521 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs()
3522 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs()
3523 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs()
3533 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs()
3535 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs()
3544 !vcpu->arch.gs_enabled) { in sync_regs()
3546 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs()
3547 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs()
3548 vcpu->arch.gs_enabled = 1; in sync_regs()
3552 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs()
3553 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs()
3555 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
3559 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
3560 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
3573 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs()
3574 save_gs_cb(vcpu->arch.host_gscb); in sync_regs()
3576 if (vcpu->arch.gs_enabled) { in sync_regs()
3590 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
3591 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
3593 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
3595 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
3596 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs()
3597 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs()
3598 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs()
3599 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
3600 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
3601 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
3602 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs()
3604 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
3609 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
3610 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
3613 if (vcpu->arch.gs_enabled) in store_regs()
3616 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs()
3617 restore_gs_cb(vcpu->arch.host_gscb); in store_regs()
3619 if (!vcpu->arch.host_gscb) in store_regs()
3621 vcpu->arch.host_gscb = NULL; in store_regs()
3722 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
3728 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
3732 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
3738 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
3789 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
3815 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
3829 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
3853 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
3867 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
3868 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
3936 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
4024 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4041 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4047 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
4115 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4146 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
4171 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()