Lines Matching +full:x +full:- +full:rc
1 // SPDX-License-Identifier: GPL-2.0
19 #include "kvm-s390.h"
23 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_is_protected()
30 lockdep_assert_held(&vcpu->mutex); in kvm_s390_pv_cpu_is_protected()
36 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
60 kvm->arch.pv.handle = 0; in kvm_s390_clear_pv_state()
61 kvm->arch.pv.guest_len = 0; in kvm_s390_clear_pv_state()
62 kvm->arch.pv.stor_base = 0; in kvm_s390_clear_pv_state()
63 kvm->arch.pv.stor_var = NULL; in kvm_s390_clear_pv_state()
66 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) in kvm_s390_pv_destroy_cpu() argument
73 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); in kvm_s390_pv_destroy_cpu()
75 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", in kvm_s390_pv_destroy_cpu()
76 vcpu->vcpu_id, *rc, *rrc); in kvm_s390_pv_destroy_cpu()
77 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_destroy_cpu()
81 free_pages(vcpu->arch.pv.stor_base, in kvm_s390_pv_destroy_cpu()
84 free_page((unsigned long)sida_addr(vcpu->arch.sie_block)); in kvm_s390_pv_destroy_cpu()
85 vcpu->arch.sie_block->pv_handle_cpu = 0; in kvm_s390_pv_destroy_cpu()
86 vcpu->arch.sie_block->pv_handle_config = 0; in kvm_s390_pv_destroy_cpu()
87 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); in kvm_s390_pv_destroy_cpu()
88 vcpu->arch.sie_block->sdf = 0; in kvm_s390_pv_destroy_cpu()
94 vcpu->arch.sie_block->gbea = 1; in kvm_s390_pv_destroy_cpu()
100 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) in kvm_s390_pv_create_cpu() argument
110 return -EINVAL; in kvm_s390_pv_create_cpu()
112 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, in kvm_s390_pv_create_cpu()
114 if (!vcpu->arch.pv.stor_base) in kvm_s390_pv_create_cpu()
115 return -ENOMEM; in kvm_s390_pv_create_cpu()
118 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
119 uvcb.num = vcpu->arch.sie_block->icpua; in kvm_s390_pv_create_cpu()
120 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block); in kvm_s390_pv_create_cpu()
121 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base); in kvm_s390_pv_create_cpu()
126 free_pages(vcpu->arch.pv.stor_base, in kvm_s390_pv_create_cpu()
128 return -ENOMEM; in kvm_s390_pv_create_cpu()
130 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr); in kvm_s390_pv_create_cpu()
133 *rc = uvcb.header.rc; in kvm_s390_pv_create_cpu()
135 KVM_UV_EVENT(vcpu->kvm, 3, in kvm_s390_pv_create_cpu()
136 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x", in kvm_s390_pv_create_cpu()
137 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc, in kvm_s390_pv_create_cpu()
144 return -EIO; in kvm_s390_pv_create_cpu()
148 vcpu->arch.pv.handle = uvcb.cpu_handle; in kvm_s390_pv_create_cpu()
149 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; in kvm_s390_pv_create_cpu()
150 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu()
151 vcpu->arch.sie_block->sdf = 2; in kvm_s390_pv_create_cpu()
159 vfree(kvm->arch.pv.stor_var); in kvm_s390_pv_dealloc_vm()
160 free_pages(kvm->arch.pv.stor_base, in kvm_s390_pv_dealloc_vm()
171 kvm->arch.pv.stor_var = NULL; in kvm_s390_pv_alloc_vm()
172 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); in kvm_s390_pv_alloc_vm()
173 if (!kvm->arch.pv.stor_base) in kvm_s390_pv_alloc_vm()
174 return -ENOMEM; in kvm_s390_pv_alloc_vm()
182 mutex_lock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
184 mutex_unlock(&kvm->slots_lock); in kvm_s390_pv_alloc_vm()
186 kvm->arch.pv.guest_len = npages * PAGE_SIZE; in kvm_s390_pv_alloc_vm()
191 kvm->arch.pv.stor_var = vzalloc(vlen); in kvm_s390_pv_alloc_vm()
192 if (!kvm->arch.pv.stor_var) in kvm_s390_pv_alloc_vm()
198 return -ENOMEM; in kvm_s390_pv_alloc_vm()
202 * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
205 * @rc: the RC code of the Destroy Secure Configuration UVC
209 * On success, kvm->mm->context.protected_count will be decremented atomically
216 u16 *rc, u16 *rrc) in kvm_s390_pv_dispose_one_leftover() argument
220 /* It used the destroy-fast UVC, nothing left to do here */ in kvm_s390_pv_dispose_one_leftover()
221 if (!leftover->handle) in kvm_s390_pv_dispose_one_leftover()
223 cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc); in kvm_s390_pv_dispose_one_leftover()
224 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_dispose_one_leftover()
225 WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_dispose_one_leftover()
234 free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len)); in kvm_s390_pv_dispose_one_leftover()
235 free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER); in kvm_s390_pv_dispose_one_leftover()
236 vfree(leftover->stor_var); in kvm_s390_pv_dispose_one_leftover()
238 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_dispose_one_leftover()
243 * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
256 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_destroy_lower_2g()
261 while (slot && slot->base_gfn < pages_2g) { in kvm_s390_destroy_lower_2g()
262 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; in kvm_s390_destroy_lower_2g()
263 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); in kvm_s390_destroy_lower_2g()
265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
268 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_destroy_lower_2g()
271 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm_fast() argument
281 if (rc) in kvm_s390_pv_deinit_vm_fast()
282 *rc = uvcb.header.rc; in kvm_s390_pv_deinit_vm_fast()
285 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm_fast()
286 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", in kvm_s390_pv_deinit_vm_fast()
287 uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_deinit_vm_fast()
288 WARN_ONCE(cc && uvcb.header.rc != 0x104, in kvm_s390_pv_deinit_vm_fast()
289 "protvirt destroy vm fast failed handle %llx rc %x rrc %x", in kvm_s390_pv_deinit_vm_fast()
290 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_deinit_vm_fast()
294 return cc ? -EIO : 0; in kvm_s390_pv_deinit_vm_fast()
303 * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
305 * @rc: return value for the RC field of the UVCB
309 * to continue immediately as a non-secure VM, and the information needed to
315 * Context: kvm->lock needs to be held
317 * Return: 0 in case of success, -EINVAL if another protected VM was already set
318 * aside, -ENOMEM if the system ran out of memory.
320 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_set_aside() argument
325 lockdep_assert_held(&kvm->lock); in kvm_s390_pv_set_aside()
330 if (kvm->arch.pv.set_aside) in kvm_s390_pv_set_aside()
331 return -EINVAL; in kvm_s390_pv_set_aside()
334 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) in kvm_s390_pv_set_aside()
335 return -EINVAL; in kvm_s390_pv_set_aside()
339 return -ENOMEM; in kvm_s390_pv_set_aside()
342 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc); in kvm_s390_pv_set_aside()
344 priv->stor_var = kvm->arch.pv.stor_var; in kvm_s390_pv_set_aside()
345 priv->stor_base = kvm->arch.pv.stor_base; in kvm_s390_pv_set_aside()
346 priv->handle = kvm_s390_pv_get_handle(kvm); in kvm_s390_pv_set_aside()
347 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table; in kvm_s390_pv_set_aside()
348 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_set_aside()
349 if (s390_replace_asce(kvm->arch.gmap)) in kvm_s390_pv_set_aside()
350 res = -ENOMEM; in kvm_s390_pv_set_aside()
360 kvm->arch.pv.set_aside = priv; in kvm_s390_pv_set_aside()
362 *rc = UVC_RC_EXECUTED; in kvm_s390_pv_set_aside()
368 * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
370 * @rc: the RC code of the UVC
382 * Context: kvm->lock needs to be held
384 * Return: 0 in case of success, otherwise -EIO
386 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_vm() argument
391 UVC_CMD_DESTROY_SEC_CONF, rc, rrc); in kvm_s390_pv_deinit_vm()
392 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); in kvm_s390_pv_deinit_vm()
394 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_vm()
398 s390_replace_asce(kvm->arch.gmap); in kvm_s390_pv_deinit_vm()
400 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_deinit_vm()
401 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); in kvm_s390_pv_deinit_vm()
403 return cc ? -EIO : 0; in kvm_s390_pv_deinit_vm()
407 * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
410 * @rc: the RC code of the first failing UVC
417 * Context: kvm->lock needs to be held unless being called from
420 * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
422 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_cleanup_all() argument
433 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count)) in kvm_s390_pv_deinit_cleanup_all()
436 *rc = 1; in kvm_s390_pv_deinit_cleanup_all()
439 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc); in kvm_s390_pv_deinit_cleanup_all()
444 if (kvm->arch.pv.set_aside) { in kvm_s390_pv_deinit_cleanup_all()
445 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_cleanup_all()
446 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_cleanup_all()
450 while (!list_empty(&kvm->arch.pv.need_cleanup)) { in kvm_s390_pv_deinit_cleanup_all()
451 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list); in kvm_s390_pv_deinit_cleanup_all()
456 * Only return the first error rc and rrc, so make in kvm_s390_pv_deinit_cleanup_all()
460 if (*rc == UVC_RC_EXECUTED) { in kvm_s390_pv_deinit_cleanup_all()
461 *rc = _rc; in kvm_s390_pv_deinit_cleanup_all()
465 list_del(&cur->list); in kvm_s390_pv_deinit_cleanup_all()
474 if (need_zap && mmget_not_zero(kvm->mm)) { in kvm_s390_pv_deinit_cleanup_all()
475 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE); in kvm_s390_pv_deinit_cleanup_all()
476 mmput(kvm->mm); in kvm_s390_pv_deinit_cleanup_all()
480 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_deinit_cleanup_all()
481 return cc ? -EIO : 0; in kvm_s390_pv_deinit_cleanup_all()
485 * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
487 * @rc: return value for the RC field of the UVCB
494 * Context: kvm->lock must not be held.
496 * Return: 0 in case of success, -EINVAL if no protected VM had been
497 * prepared for asynchronous teardowm, -EIO in case of other errors.
499 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_deinit_aside_vm() argument
504 lockdep_assert_not_held(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
505 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
506 p = kvm->arch.pv.set_aside; in kvm_s390_pv_deinit_aside_vm()
507 kvm->arch.pv.set_aside = NULL; in kvm_s390_pv_deinit_aside_vm()
508 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
510 return -EINVAL; in kvm_s390_pv_deinit_aside_vm()
513 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX)) in kvm_s390_pv_deinit_aside_vm()
515 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc)) in kvm_s390_pv_deinit_aside_vm()
516 ret = -EIO; in kvm_s390_pv_deinit_aside_vm()
525 mutex_lock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
526 list_add(&p->list, &kvm->arch.pv.need_cleanup); in kvm_s390_pv_deinit_aside_vm()
527 mutex_unlock(&kvm->lock); in kvm_s390_pv_deinit_aside_vm()
529 *rc = UVC_RC_EXECUTED; in kvm_s390_pv_deinit_aside_vm()
558 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_pv_init_vm() argument
573 uvcb.guest_stor_len = kvm->arch.pv.guest_len; in kvm_s390_pv_init_vm()
574 uvcb.guest_asce = kvm->arch.gmap->asce; in kvm_s390_pv_init_vm()
575 uvcb.guest_sca = virt_to_phys(kvm->arch.sca); in kvm_s390_pv_init_vm()
577 virt_to_phys((void *)kvm->arch.pv.stor_base); in kvm_s390_pv_init_vm()
578 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; in kvm_s390_pv_init_vm()
579 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap; in kvm_s390_pv_init_vm()
580 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr; in kvm_s390_pv_init_vm()
583 *rc = uvcb.header.rc; in kvm_s390_pv_init_vm()
585 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x", in kvm_s390_pv_init_vm()
586 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw); in kvm_s390_pv_init_vm()
589 kvm->arch.pv.handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
591 atomic_inc(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
593 if (uvcb.header.rc & UVC_RC_NEED_DESTROY) { in kvm_s390_pv_init_vm()
596 atomic_dec(&kvm->mm->context.protected_count); in kvm_s390_pv_init_vm()
599 return -EIO; in kvm_s390_pv_init_vm()
601 kvm->arch.gmap->guest_handle = uvcb.guest_handle; in kvm_s390_pv_init_vm()
602 /* Add the notifier only once. No races because we hold kvm->lock */ in kvm_s390_pv_init_vm()
603 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { in kvm_s390_pv_init_vm()
604 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; in kvm_s390_pv_init_vm()
605 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_s390_pv_init_vm()
610 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, in kvm_s390_pv_set_sec_parms() argument
622 *rc = uvcb.header.rc; in kvm_s390_pv_set_sec_parms()
624 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", in kvm_s390_pv_set_sec_parms()
625 *rc, *rrc); in kvm_s390_pv_set_sec_parms()
626 return cc ? -EINVAL : 0; in kvm_s390_pv_set_sec_parms()
630 u64 offset, u16 *rc, u16 *rrc) in unpack_one() argument
640 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); in unpack_one()
642 *rc = uvcb.header.rc; in unpack_one()
645 if (ret && ret != -EAGAIN) in unpack_one()
646 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x", in unpack_one()
647 uvcb.gaddr, *rc, *rrc); in unpack_one()
652 unsigned long tweak, u16 *rc, u16 *rrc) in kvm_s390_pv_unpack() argument
658 return -EINVAL; in kvm_s390_pv_unpack()
664 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc); in kvm_s390_pv_unpack()
665 if (ret == -EAGAIN) { in kvm_s390_pv_unpack()
692 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x", in kvm_s390_pv_set_cpu_state()
693 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_set_cpu_state()
695 return -EINVAL; in kvm_s390_pv_set_cpu_state()
699 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) in kvm_s390_pv_dump_cpu() argument
704 .cpu_handle = vcpu->arch.pv.handle, in kvm_s390_pv_dump_cpu()
710 *rc = uvcb.header.rc; in kvm_s390_pv_dump_cpu()
726 * @rc: Pointer to where the uvcb return code is stored
733 * might be written to even if an error rc is returned. For instance
736 * Context: kvm->lock needs to be held
740 * -ENOMEM if allocating the cache fails
741 * -EINVAL if gaddr is not aligned to 1MB
742 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
743 * -EINVAL if the UV call fails, rc and rrc will be set in this case
744 * -EFAULT if copying the result to buff_user failed
747 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc) in kvm_s390_pv_dump_stor_state() argument
752 .config_handle = kvm->arch.pv.handle, in kvm_s390_pv_dump_stor_state()
762 ret = -EINVAL; in kvm_s390_pv_dump_stor_state()
782 ret = -ENOMEM; in kvm_s390_pv_dump_stor_state()
797 ret = -EINVAL; in kvm_s390_pv_dump_stor_state()
803 buff_user_len -= increment_len; in kvm_s390_pv_dump_stor_state()
809 ret = -EFAULT; in kvm_s390_pv_dump_stor_state()
826 "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x", in kvm_s390_pv_dump_stor_state()
827 uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc); in kvm_s390_pv_dump_stor_state()
828 *rc = uvcb.header.rc; in kvm_s390_pv_dump_stor_state()
840 * @rc: Pointer to where the uvcb return code is stored
846 * Context: kvm->lock needs to be held
850 * -ENOMEM if allocating the completion buffer fails
851 * -EINVAL if the UV call fails, rc and rrc will be set in this case
852 * -EFAULT if copying the result to buff_user failed
855 u16 *rc, u16 *rrc) in kvm_s390_pv_dump_complete() argument
868 return -ENOMEM; in kvm_s390_pv_dump_complete()
872 *rc = complete.header.rc; in kvm_s390_pv_dump_complete()
874 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x", in kvm_s390_pv_dump_complete()
875 complete.header.rc, complete.header.rrc); in kvm_s390_pv_dump_complete()
883 kvm->arch.pv.dumping = false; in kvm_s390_pv_dump_complete()
887 ret = -EFAULT; in kvm_s390_pv_dump_complete()
890 /* If the UVC returned an error, translate it to -EINVAL */ in kvm_s390_pv_dump_complete()
892 ret = -EINVAL; in kvm_s390_pv_dump_complete()