Lines Matching +full:gpa +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0
16 #include "kvm-s390.h"
23 unsigned long origin : 52; /* Region- or Segment-Table Origin */
25 unsigned long g : 1; /* Subspace Group Control */
26 unsigned long p : 1; /* Private Space Control */
27 unsigned long s : 1; /* Storage-Alteration-Event Control */
28 unsigned long x : 1; /* Space-Switch-Event Control */
29 unsigned long r : 1; /* Real-Space Control */
30 unsigned long : 1;
31 unsigned long dt : 2; /* Designation-Type Control */
32 unsigned long tl : 2; /* Region- or Segment-Table Length */
38 ASCE_TYPE_REGION3 = 1,
46 unsigned long rto: 52;/* Region-Table Origin */
48 unsigned long p : 1; /* DAT-Protection Bit */
49 unsigned long : 1;
50 unsigned long tf : 2; /* Region-Second-Table Offset */
51 unsigned long i : 1; /* Region-Invalid Bit */
52 unsigned long : 1;
53 unsigned long tt : 2; /* Table-Type Bits */
54 unsigned long tl : 2; /* Region-Second-Table Length */
61 unsigned long rto: 52;/* Region-Table Origin */
63 unsigned long p : 1; /* DAT-Protection Bit */
64 unsigned long : 1;
65 unsigned long tf : 2; /* Region-Third-Table Offset */
66 unsigned long i : 1; /* Region-Invalid Bit */
67 unsigned long : 1;
68 unsigned long tt : 2; /* Table-Type Bits */
69 unsigned long tl : 2; /* Region-Third-Table Length */
74 unsigned long sto: 52;/* Segment-Table Origin */
75 unsigned long : 1;
76 unsigned long fc : 1; /* Format-Control */
77 unsigned long p : 1; /* DAT-Protection Bit */
78 unsigned long : 1;
79 unsigned long tf : 2; /* Segment-Table Offset */
80 unsigned long i : 1; /* Region-Invalid Bit */
81 unsigned long cr : 1; /* Common-Region Bit */
82 unsigned long tt : 2; /* Table-Type Bits */
83 unsigned long tl : 2; /* Segment-Table Length */
87 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
89 unsigned long av : 1; /* ACCF-Validity Control */
90 unsigned long acc: 4; /* Access-Control Bits */
91 unsigned long f : 1; /* Fetch-Protection Bit */
92 unsigned long fc : 1; /* Format-Control */
93 unsigned long p : 1; /* DAT-Protection Bit */
94 unsigned long iep: 1; /* Instruction-Execution-Protection */
96 unsigned long i : 1; /* Region-Invalid Bit */
97 unsigned long cr : 1; /* Common-Region Bit */
98 unsigned long tt : 2; /* Table-Type Bits */
108 unsigned long fc : 1; /* Format-Control */
110 unsigned long i : 1; /* Region-Invalid Bit */
111 unsigned long cr : 1; /* Common-Region Bit */
112 unsigned long tt : 2; /* Table-Type Bits */
118 unsigned long pto: 53;/* Page-Table Origin */
119 unsigned long fc : 1; /* Format-Control */
120 unsigned long p : 1; /* DAT-Protection Bit */
122 unsigned long i : 1; /* Segment-Invalid Bit */
123 unsigned long cs : 1; /* Common-Segment Bit */
124 unsigned long tt : 2; /* Table-Type Bits */
129 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
131 unsigned long av : 1; /* ACCF-Validity Control */
132 unsigned long acc: 4; /* Access-Control Bits */
133 unsigned long f : 1; /* Fetch-Protection Bit */
134 unsigned long fc : 1; /* Format-Control */
135 unsigned long p : 1; /* DAT-Protection Bit */
136 unsigned long iep: 1; /* Instruction-Execution-Protection */
138 unsigned long i : 1; /* Segment-Invalid Bit */
139 unsigned long cs : 1; /* Common-Segment Bit */
140 unsigned long tt : 2; /* Table-Type Bits */
150 unsigned long fc : 1; /* Format-Control */
152 unsigned long i : 1; /* Segment-Invalid Bit */
153 unsigned long cs : 1; /* Common-Segment Bit */
154 unsigned long tt : 2; /* Table-Type Bits */
161 TABLE_TYPE_REGION3 = 1,
169 unsigned long pfra : 52; /* Page-Frame Real Address */
170 unsigned long z : 1; /* Zero Bit */
171 unsigned long i : 1; /* Page-Invalid Bit */
172 unsigned long p : 1; /* DAT-Protection Bit */
173 unsigned long iep: 1; /* Instruction-Execution-Protection */
211 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
212 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
213 unsigned long pfra : 52; /* Page-Frame Real Address */
220 u32 p : 1;
229 u32 : 1;
236 unsigned long i : 1; /* ALEN-Invalid Bit */
238 unsigned long fo : 1; /* Fetch-Only Bit */
239 unsigned long p : 1; /* Private Bit */
240 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
241 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
243 unsigned long : 1;
244 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
250 unsigned long i : 1; /* ASX-Invalid Bit */
251 unsigned long ato : 29; /* Authority-Table Origin */
252 unsigned long : 1;
253 unsigned long b : 1; /* Base-Space Bit */
255 unsigned long atl : 12; /* Authority-Table Length */
257 unsigned long ca : 1; /* Controlled-ASN Bit */
258 unsigned long ra : 1; /* Reusable-ASN Bit */
259 unsigned long asce : 64; /* Address-Space-Control Element */
270 read_lock(&kvm->arch.sca_lock); in ipte_lock_held()
271 rc = kvm_s390_get_ipte_control(kvm)->kh != 0; in ipte_lock_held()
272 read_unlock(&kvm->arch.sca_lock); in ipte_lock_held()
275 return kvm->arch.ipte_lock_count != 0; in ipte_lock_held()
282 mutex_lock(&kvm->arch.ipte_mutex); in ipte_lock_simple()
283 kvm->arch.ipte_lock_count++; in ipte_lock_simple()
284 if (kvm->arch.ipte_lock_count > 1) in ipte_lock_simple()
287 read_lock(&kvm->arch.sca_lock); in ipte_lock_simple()
292 read_unlock(&kvm->arch.sca_lock); in ipte_lock_simple()
297 new.k = 1; in ipte_lock_simple()
298 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); in ipte_lock_simple()
299 read_unlock(&kvm->arch.sca_lock); in ipte_lock_simple()
301 mutex_unlock(&kvm->arch.ipte_mutex); in ipte_lock_simple()
308 mutex_lock(&kvm->arch.ipte_mutex); in ipte_unlock_simple()
309 kvm->arch.ipte_lock_count--; in ipte_unlock_simple()
310 if (kvm->arch.ipte_lock_count) in ipte_unlock_simple()
312 read_lock(&kvm->arch.sca_lock); in ipte_unlock_simple()
318 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); in ipte_unlock_simple()
319 read_unlock(&kvm->arch.sca_lock); in ipte_unlock_simple()
320 wake_up(&kvm->arch.ipte_wq); in ipte_unlock_simple()
322 mutex_unlock(&kvm->arch.ipte_mutex); in ipte_unlock_simple()
330 read_lock(&kvm->arch.sca_lock); in ipte_lock_siif()
335 read_unlock(&kvm->arch.sca_lock); in ipte_lock_siif()
340 new.k = 1; in ipte_lock_siif()
342 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); in ipte_lock_siif()
343 read_unlock(&kvm->arch.sca_lock); in ipte_lock_siif()
350 read_lock(&kvm->arch.sca_lock); in ipte_unlock_siif()
355 new.kh--; in ipte_unlock_siif()
358 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); in ipte_unlock_siif()
359 read_unlock(&kvm->arch.sca_lock); in ipte_unlock_siif()
361 wake_up(&kvm->arch.ipte_wq); in ipte_unlock_siif()
392 return -EINVAL; in ar_translation()
394 save_access_regs(vcpu->run->s.regs.acrs); in ar_translation()
395 alet.val = vcpu->run->s.regs.acrs[ar]; in ar_translation()
398 asce->val = vcpu->arch.sie_block->gcr[1]; in ar_translation()
400 } else if (alet.val == 1) { in ar_translation()
401 asce->val = vcpu->arch.sie_block->gcr[7]; in ar_translation()
409 ald_addr = vcpu->arch.sie_block->gcr[5]; in ar_translation()
411 ald_addr = vcpu->arch.sie_block->gcr[2]; in ar_translation()
421 if (0x7fffffff - ald.alo * 128 < alet.alen * 16) in ar_translation()
429 if (ale.i == 1) in ar_translation()
443 if (ale.p == 1) { in ar_translation()
444 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; in ar_translation()
462 if (ale.fo == 1 && mode == GACC_STORE) in ar_translation()
465 asce->val = aste.asce; in ar_translation()
470 unsigned long addr : 52; /* Translation-exception Address */
473 unsigned long b56 : 1;
475 unsigned long b60 : 1;
476 unsigned long b61 : 1;
482 FSI_STORE = 1, /* Exception was due to store operation */
488 PROT_TYPE_KEYC = 1,
499 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; in trans_exc_ending()
503 pgm->code = code; in trans_exc_ending()
504 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; in trans_exc_ending()
511 WARN_ON_ONCE(1); in trans_exc_ending()
514 tec->b61 = 1; in trans_exc_ending()
517 tec->b56 = 1; in trans_exc_ending()
520 tec->b60 = 1; in trans_exc_ending()
523 tec->b60 = 1; in trans_exc_ending()
526 tec->b61 = 1; in trans_exc_ending()
530 tec->b56 = 0; in trans_exc_ending()
531 tec->b60 = 0; in trans_exc_ending()
532 tec->b61 = 0; in trans_exc_ending()
542 * op_access_id only applies to MOVE_PAGE -> set bit 61 in trans_exc_ending()
546 tec->addr = gva >> PAGE_SHIFT; in trans_exc_ending()
547 tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH; in trans_exc_ending()
548 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as; in trans_exc_ending()
557 * undefined for non-ar cases. It is undefined for in trans_exc_ending()
560 pgm->exc_access_id = ar; in trans_exc_ending()
576 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); in get_vcpu_asce()
579 asce->val = 0; in get_vcpu_asce()
580 asce->r = 1; in get_vcpu_asce()
589 asce->val = vcpu->arch.sie_block->gcr[1]; in get_vcpu_asce()
592 asce->val = vcpu->arch.sie_block->gcr[7]; in get_vcpu_asce()
595 asce->val = vcpu->arch.sie_block->gcr[13]; in get_vcpu_asce()
606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument
608 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table()
612 * guest_translate - translate a guest virtual into a guest absolute address
615 * @gpa: points to where guest physical (absolute) address should be stored
623 * an addressing exception is indicated and @gpa will not be changed.
625 * Returns: - zero on success; @gpa contains the resulting absolute address
626 * - a negative value if guest access failed due to e.g. broken
628 * - a positve value if an access exception happened. In this case
633 unsigned long *gpa, const union asce asce, in guest_translate() argument
645 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; in guest_translate()
646 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); in guest_translate()
647 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); in guest_translate()
648 iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130); in guest_translate()
684 if (kvm_is_error_gpa(vcpu->kvm, ptr)) in guest_translate()
686 if (deref_table(vcpu->kvm, ptr, &rfte.val)) in guest_translate()
687 return -EFAULT; in guest_translate()
702 if (kvm_is_error_gpa(vcpu->kvm, ptr)) in guest_translate()
704 if (deref_table(vcpu->kvm, ptr, &rste.val)) in guest_translate()
705 return -EFAULT; in guest_translate()
720 if (kvm_is_error_gpa(vcpu->kvm, ptr)) in guest_translate()
722 if (deref_table(vcpu->kvm, ptr, &rtte.val)) in guest_translate()
723 return -EFAULT; in guest_translate()
748 if (kvm_is_error_gpa(vcpu->kvm, ptr)) in guest_translate()
750 if (deref_table(vcpu->kvm, ptr, &ste.val)) in guest_translate()
751 return -EFAULT; in guest_translate()
768 if (kvm_is_error_gpa(vcpu->kvm, ptr)) in guest_translate()
770 if (deref_table(vcpu->kvm, ptr, &pte.val)) in guest_translate()
771 return -EFAULT; in guest_translate()
790 if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) in guest_translate()
792 *gpa = raddr.addr; in guest_translate()
805 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; in low_address_protection_enabled()
806 psw_t *psw = &vcpu->arch.sie_block->gpsw; in low_address_protection_enabled()
812 return 1; in low_address_protection_enabled()
816 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() argument
826 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key()
830 mmap_read_lock(current->mm); in vm_check_access_key()
831 r = get_guest_storage_key(current->mm, hva, &storage_key); in vm_check_access_key()
832 mmap_read_unlock(current->mm); in vm_check_access_key()
847 psw_t *psw = &vcpu->arch.sie_block->gpsw; in fetch_prot_override_applicable()
852 override = vcpu->arch.sie_block->gcr[0]; in fetch_prot_override_applicable()
869 return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE; in storage_prot_override_applicable()
874 /* matches special storage protection override key (9) -> allow */ in storage_prot_override_applies()
879 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() argument
886 /* access key 0 matches any storage key -> allow */ in vcpu_check_access_key()
893 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); in vcpu_check_access_key()
894 mmap_read_lock(current->mm); in vcpu_check_access_key()
895 r = get_guest_storage_key(current->mm, hva, &storage_key); in vcpu_check_access_key()
896 mmap_read_unlock(current->mm); in vcpu_check_access_key()
900 /* access key matches storage key -> allow */ in vcpu_check_access_key()
904 /* it is a fetch and fetch protection is off -> allow */ in vcpu_check_access_key()
918 * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
925 * @asce: address-space-control element to use for translation
930 * such that the concatenation of page fragments starting at each gpa make up
941 * When deriving the boundaries of a fragment from a gpa, all but the last
945 * * 0 - success
946 * * <0 - translation could not be performed, for example if guest
948 * * >0 - an access exception occurred. In this case the returned value
957 psw_t *psw = &vcpu->arch.sie_block->gpsw; in guest_range_to_gpas()
962 unsigned long gpa; in guest_range_to_gpas() local
965 while (min(PAGE_SIZE - offset, len) > 0) { in guest_range_to_gpas()
966 fragment_len = min(PAGE_SIZE - offset, len); in guest_range_to_gpas()
972 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); in guest_range_to_gpas()
976 gpa = kvm_s390_real_to_abs(vcpu, ga); in guest_range_to_gpas()
977 if (kvm_is_error_gpa(vcpu->kvm, gpa)) { in guest_range_to_gpas()
984 rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga, in guest_range_to_gpas()
989 *gpas++ = gpa; in guest_range_to_gpas()
992 len -= fragment_len; in guest_range_to_gpas()
997 static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, in access_guest_page() argument
1000 const unsigned int offset = offset_in_page(gpa); in access_guest_page()
1001 const gfn_t gfn = gpa_to_gfn(gpa); in access_guest_page()
1012 access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, in access_guest_page_with_key() argument
1021 gfn = gpa >> PAGE_SHIFT; in access_guest_page_with_key()
1032 return -EOPNOTSUPP; in access_guest_page_with_key()
1033 hva += offset_in_page(gpa); in access_guest_page_with_key()
1045 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, in access_guest_abs_with_key() argument
1048 int offset = offset_in_page(gpa); in access_guest_abs_with_key()
1052 while (min(PAGE_SIZE - offset, len) > 0) { in access_guest_abs_with_key()
1053 fragment_len = min(PAGE_SIZE - offset, len); in access_guest_abs_with_key()
1054 rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key); in access_guest_abs_with_key()
1058 len -= fragment_len; in access_guest_abs_with_key()
1060 gpa += fragment_len; in access_guest_abs_with_key()
1069 psw_t *psw = &vcpu->arch.sie_block->gpsw; in access_guest_with_key()
1087 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; in access_guest_with_key()
1092 return -ENOMEM; in access_guest_with_key()
1097 ipte_lock(vcpu->kvm); in access_guest_with_key()
1112 fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len); in access_guest_with_key()
1114 rc = access_guest_page(vcpu->kvm, mode, gpas[idx], in access_guest_with_key()
1117 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], in access_guest_with_key()
1121 rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx], in access_guest_with_key()
1125 len -= fragment_len; in access_guest_with_key()
1140 ipte_unlock(vcpu->kvm); in access_guest_with_key()
1150 unsigned long gpa; in access_guest_real() local
1154 gpa = kvm_s390_real_to_abs(vcpu, gra); in access_guest_real()
1155 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); in access_guest_real()
1156 rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len); in access_guest_real()
1157 len -= fragment_len; in access_guest_real()
1165 * guest_translate_address_with_key - translate guest logical into guest absolute address
1169 * @gpa: Guest physical address
1180 unsigned long *gpa, enum gacc_mode mode, in guest_translate_address_with_key() argument
1190 return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode, in guest_translate_address_with_key()
1195 * check_gva_range - test a range of guest virtual addresses for accessibility
1212 ipte_lock(vcpu->kvm); in check_gva_range()
1215 ipte_unlock(vcpu->kvm); in check_gva_range()
1221 * check_gpa_range - test a range of guest physical addresses for accessibility
1223 * @gpa: guest physical address
1228 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, in check_gpa_range() argument
1235 fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length); in check_gpa_range()
1236 rc = vm_check_access_key(kvm, access_key, mode, gpa); in check_gpa_range()
1237 length -= fragment_len; in check_gpa_range()
1238 gpa += fragment_len; in check_gpa_range()
1244 * kvm_s390_check_low_addr_prot_real - check for low-address protection
1248 * Checks whether an address is subject to low-address protection and set
1249 * up vcpu->arch.pgm accordingly if necessary.
1255 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; in kvm_s390_check_low_addr_prot_real()
1263 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1284 parent = sg->parent; in kvm_s390_shadow_tables()
1286 asce.val = sg->orig_asce; in kvm_s390_shadow_tables()
1289 *fake = 1; in kvm_s390_shadow_tables()
1337 if (sg->edat_level >= 1) in kvm_s390_shadow_tables()
1364 if (sg->edat_level >= 1) in kvm_s390_shadow_tables()
1390 if (rtte.cr && asce.p && sg->edat_level >= 2) in kvm_s390_shadow_tables()
1392 if (rtte.fc && sg->edat_level >= 2) { in kvm_s390_shadow_tables()
1394 *fake = 1; in kvm_s390_shadow_tables()
1401 if (sg->edat_level >= 1) in kvm_s390_shadow_tables()
1430 if (ste.fc && sg->edat_level >= 1) { in kvm_s390_shadow_tables()
1431 *fake = 1; in kvm_s390_shadow_tables()
1450 * kvm_s390_shadow_fault - handle fault on a shadow page table
1457 * Returns: - 0 if the shadow fault was successfully resolved
1458 * - > 0 (pgm exception code) on exceptions while faulting
1459 * - -EAGAIN if the caller can retry immediately
1460 * - -EFAULT when accessing invalid guest addresses
1461 * - -ENOMEM if out of memory
1472 mmap_read_lock(sg->mm); in kvm_s390_shadow_fault()
1474 * We don't want any guest-2 tables to change - so the parent in kvm_s390_shadow_fault()
1475 * tables/pointers we read stay valid - unshadowing is however in kvm_s390_shadow_fault()
1476 * always possible - only guest_table_lock protects us. in kvm_s390_shadow_fault()
1478 ipte_lock(vcpu->kvm); in kvm_s390_shadow_fault()
1500 rc = gmap_read_table(sg->parent, pgt, &pte.val); in kvm_s390_shadow_fault()
1512 ipte_unlock(vcpu->kvm); in kvm_s390_shadow_fault()
1513 mmap_read_unlock(sg->mm); in kvm_s390_shadow_fault()