Lines Matching refs:vsie_page

24 struct vsie_page {  struct
63 static void prefix_unmapped(struct vsie_page *vsie_page) in prefix_unmapped() argument
65 atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_unmapped()
69 static void prefix_unmapped_sync(struct vsie_page *vsie_page) in prefix_unmapped_sync() argument
71 prefix_unmapped(vsie_page); in prefix_unmapped_sync()
72 if (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
73 atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); in prefix_unmapped_sync()
74 while (vsie_page->scb_s.prog0c & PROG_IN_SIE) in prefix_unmapped_sync()
79 static void prefix_mapped(struct vsie_page *vsie_page) in prefix_mapped() argument
81 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); in prefix_mapped()
85 static int prefix_is_mapped(struct vsie_page *vsie_page) in prefix_is_mapped() argument
87 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); in prefix_is_mapped()
91 static void update_intervention_requests(struct vsie_page *vsie_page) in update_intervention_requests() argument
96 cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); in update_intervention_requests()
97 atomic_andnot(bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
98 atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); in update_intervention_requests()
102 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_cpuflags() argument
104 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_cpuflags()
105 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_cpuflags()
149 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_crycb() argument
151 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_crycb()
152 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_crycb()
177 vsie_page->crycb.dea_wrapping_key_mask, 56)) in shadow_crycb()
181 scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | in shadow_crycb()
185 b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; in shadow_crycb()
194 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in prepare_ibc() argument
196 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in prepare_ibc()
197 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in prepare_ibc()
217 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unshadow_scb() argument
219 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unshadow_scb()
220 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in unshadow_scb()
273 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in shadow_scb() argument
275 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in shadow_scb()
276 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in shadow_scb()
295 rc = prepare_cpuflags(vcpu, vsie_page); in shadow_scb()
331 prefix_unmapped(vsie_page); in shadow_scb()
350 prefix_unmapped(vsie_page); in shadow_scb()
386 prepare_ibc(vcpu, vsie_page); in shadow_scb()
387 rc = shadow_crycb(vcpu, vsie_page); in shadow_scb()
390 unshadow_scb(vcpu, vsie_page); in shadow_scb()
398 struct vsie_page *cur; in kvm_s390_vsie_gmap_notifier()
440 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in map_prefix() argument
442 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in map_prefix()
446 if (prefix_is_mapped(vsie_page)) in map_prefix()
450 prefix_mapped(vsie_page); in map_prefix()
455 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); in map_prefix()
457 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in map_prefix()
464 prefix_unmapped(vsie_page); in map_prefix()
497 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in unpin_blocks() argument
499 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in unpin_blocks()
504 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); in unpin_blocks()
505 vsie_page->sca_gpa = 0; in unpin_blocks()
512 unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); in unpin_blocks()
513 vsie_page->itdba_gpa = 0; in unpin_blocks()
519 unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); in unpin_blocks()
520 vsie_page->gvrd_gpa = 0; in unpin_blocks()
526 unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); in unpin_blocks()
527 vsie_page->riccbd_gpa = 0; in unpin_blocks()
533 unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); in unpin_blocks()
534 vsie_page->sdnx_gpa = 0; in unpin_blocks()
553 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in pin_blocks() argument
555 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in pin_blocks()
556 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in pin_blocks()
579 vsie_page->sca_gpa = gpa; in pin_blocks()
596 vsie_page->itdba_gpa = gpa; in pin_blocks()
615 vsie_page->gvrd_gpa = gpa; in pin_blocks()
632 vsie_page->riccbd_gpa = gpa; in pin_blocks()
661 vsie_page->sdnx_gpa = gpa; in pin_blocks()
666 unpin_blocks(vcpu, vsie_page); in pin_blocks()
671 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in unpin_scb() argument
674 hpa_t hpa = (hpa_t) vsie_page->scb_o; in unpin_scb()
678 vsie_page->scb_o = NULL; in unpin_scb()
687 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, in pin_scb() argument
699 vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa; in pin_scb()
739 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_fault() argument
748 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_fault()
755 vsie_page->fault_addr = current->thread.gmap_addr; in handle_fault()
767 struct vsie_page *vsie_page) in handle_last_fault() argument
769 if (vsie_page->fault_addr) in handle_last_fault()
770 kvm_s390_shadow_fault(vcpu, vsie_page->gmap, in handle_last_fault()
771 vsie_page->fault_addr); in handle_last_fault()
772 vsie_page->fault_addr = 0; in handle_last_fault()
775 static inline void clear_vsie_icpt(struct vsie_page *vsie_page) in clear_vsie_icpt() argument
777 vsie_page->scb_s.icptcode = 0; in clear_vsie_icpt()
781 static void retry_vsie_icpt(struct vsie_page *vsie_page) in retry_vsie_icpt() argument
783 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in retry_vsie_icpt()
793 clear_vsie_icpt(vsie_page); in retry_vsie_icpt()
803 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in handle_stfle() argument
805 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in handle_stfle()
806 __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; in handle_stfle()
809 retry_vsie_icpt(vsie_page); in handle_stfle()
810 if (read_guest_real(vcpu, fac, &vsie_page->fac, in handle_stfle()
811 sizeof(vsie_page->fac))) in handle_stfle()
813 scb_s->fac = (__u32)(__u64) &vsie_page->fac; in handle_stfle()
826 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in do_vsie_run() argument
830 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in do_vsie_run()
831 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; in do_vsie_run()
835 handle_last_fault(vcpu, vsie_page); in do_vsie_run()
875 kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); in do_vsie_run()
882 return handle_fault(vcpu, vsie_page); in do_vsie_run()
887 rc = handle_stfle(vcpu, vsie_page); in do_vsie_run()
892 clear_vsie_icpt(vsie_page); in do_vsie_run()
902 static void release_gmap_shadow(struct vsie_page *vsie_page) in release_gmap_shadow() argument
904 if (vsie_page->gmap) in release_gmap_shadow()
905 gmap_put(vsie_page->gmap); in release_gmap_shadow()
906 WRITE_ONCE(vsie_page->gmap, NULL); in release_gmap_shadow()
907 prefix_unmapped(vsie_page); in release_gmap_shadow()
911 struct vsie_page *vsie_page) in acquire_gmap_shadow() argument
928 if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) in acquire_gmap_shadow()
932 release_gmap_shadow(vsie_page); in acquire_gmap_shadow()
937 WRITE_ONCE(vsie_page->gmap, gmap); in acquire_gmap_shadow()
945 struct vsie_page *vsie_page) in register_shadow_scb() argument
947 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in register_shadow_scb()
949 WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); in register_shadow_scb()
988 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) in vsie_run() argument
990 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; in vsie_run()
994 rc = acquire_gmap_shadow(vcpu, vsie_page); in vsie_run()
996 rc = map_prefix(vcpu, vsie_page); in vsie_run()
998 gmap_enable(vsie_page->gmap); in vsie_run()
999 update_intervention_requests(vsie_page); in vsie_run()
1000 rc = do_vsie_run(vcpu, vsie_page); in vsie_run()
1038 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) in get_vsie_page()
1040 struct vsie_page *vsie_page; in get_vsie_page() local
1090 vsie_page = page_to_virt(page); in get_vsie_page()
1091 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); in get_vsie_page()
1092 release_gmap_shadow(vsie_page); in get_vsie_page()
1093 vsie_page->fault_addr = 0; in get_vsie_page()
1094 vsie_page->scb_s.ihcpu = 0xffffU; in get_vsie_page()
1095 return vsie_page; in get_vsie_page()
1099 static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page) in put_vsie_page() argument
1101 struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT); in put_vsie_page()
1108 struct vsie_page *vsie_page; in kvm_s390_handle_vsie() local
1118 BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); in kvm_s390_handle_vsie()
1128 vsie_page = get_vsie_page(vcpu->kvm, scb_addr); in kvm_s390_handle_vsie()
1129 if (IS_ERR(vsie_page)) in kvm_s390_handle_vsie()
1130 return PTR_ERR(vsie_page); in kvm_s390_handle_vsie()
1131 else if (!vsie_page) in kvm_s390_handle_vsie()
1135 rc = pin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1138 rc = shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1141 rc = pin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1144 register_shadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1145 rc = vsie_run(vcpu, vsie_page); in kvm_s390_handle_vsie()
1147 unpin_blocks(vcpu, vsie_page); in kvm_s390_handle_vsie()
1149 unshadow_scb(vcpu, vsie_page); in kvm_s390_handle_vsie()
1151 unpin_scb(vcpu, vsie_page, scb_addr); in kvm_s390_handle_vsie()
1153 put_vsie_page(vcpu->kvm, vsie_page); in kvm_s390_handle_vsie()
1168 struct vsie_page *vsie_page; in kvm_s390_vsie_destroy() local
1176 vsie_page = page_to_virt(page); in kvm_s390_vsie_destroy()
1177 release_gmap_shadow(vsie_page); in kvm_s390_vsie_destroy()