1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Test for s390x KVM_S390_MEM_OP
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12
13 #include <linux/bits.h>
14
15 #include "test_util.h"
16 #include "kvm_util.h"
17 #include "kselftest.h"
18
19 enum mop_target {
20 LOGICAL,
21 SIDA,
22 ABSOLUTE,
23 INVALID,
24 };
25
26 enum mop_access_mode {
27 READ,
28 WRITE,
29 };
30
31 struct mop_desc {
32 uintptr_t gaddr;
33 uintptr_t gaddr_v;
34 uint64_t set_flags;
35 unsigned int f_check : 1;
36 unsigned int f_inject : 1;
37 unsigned int f_key : 1;
38 unsigned int _gaddr_v : 1;
39 unsigned int _set_flags : 1;
40 unsigned int _sida_offset : 1;
41 unsigned int _ar : 1;
42 uint32_t size;
43 enum mop_target target;
44 enum mop_access_mode mode;
45 void *buf;
46 uint32_t sida_offset;
47 uint8_t ar;
48 uint8_t key;
49 };
50
ksmo_from_desc(struct mop_desc desc)51 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
52 {
53 struct kvm_s390_mem_op ksmo = {
54 .gaddr = (uintptr_t)desc.gaddr,
55 .size = desc.size,
56 .buf = ((uintptr_t)desc.buf),
57 .reserved = "ignored_ignored_ignored_ignored"
58 };
59
60 switch (desc.target) {
61 case LOGICAL:
62 if (desc.mode == READ)
63 ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
64 if (desc.mode == WRITE)
65 ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
66 break;
67 case SIDA:
68 if (desc.mode == READ)
69 ksmo.op = KVM_S390_MEMOP_SIDA_READ;
70 if (desc.mode == WRITE)
71 ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
72 break;
73 case ABSOLUTE:
74 if (desc.mode == READ)
75 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
76 if (desc.mode == WRITE)
77 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
78 break;
79 case INVALID:
80 ksmo.op = -1;
81 }
82 if (desc.f_check)
83 ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
84 if (desc.f_inject)
85 ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
86 if (desc._set_flags)
87 ksmo.flags = desc.set_flags;
88 if (desc.f_key) {
89 ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
90 ksmo.key = desc.key;
91 }
92 if (desc._ar)
93 ksmo.ar = desc.ar;
94 else
95 ksmo.ar = 0;
96 if (desc._sida_offset)
97 ksmo.sida_offset = desc.sida_offset;
98
99 return ksmo;
100 }
101
102 struct test_info {
103 struct kvm_vm *vm;
104 struct kvm_vcpu *vcpu;
105 };
106
107 #define PRINT_MEMOP false
print_memop(struct kvm_vcpu * vcpu,const struct kvm_s390_mem_op * ksmo)108 static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
109 {
110 if (!PRINT_MEMOP)
111 return;
112
113 if (!vcpu)
114 printf("vm memop(");
115 else
116 printf("vcpu memop(");
117 switch (ksmo->op) {
118 case KVM_S390_MEMOP_LOGICAL_READ:
119 printf("LOGICAL, READ, ");
120 break;
121 case KVM_S390_MEMOP_LOGICAL_WRITE:
122 printf("LOGICAL, WRITE, ");
123 break;
124 case KVM_S390_MEMOP_SIDA_READ:
125 printf("SIDA, READ, ");
126 break;
127 case KVM_S390_MEMOP_SIDA_WRITE:
128 printf("SIDA, WRITE, ");
129 break;
130 case KVM_S390_MEMOP_ABSOLUTE_READ:
131 printf("ABSOLUTE, READ, ");
132 break;
133 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
134 printf("ABSOLUTE, WRITE, ");
135 break;
136 }
137 printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
138 ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
139 if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
140 printf(", CHECK_ONLY");
141 if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
142 printf(", INJECT_EXCEPTION");
143 if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
144 printf(", SKEY_PROTECTION");
145 puts(")");
146 }
147
memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo)148 static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
149 {
150 struct kvm_vcpu *vcpu = info.vcpu;
151
152 if (!vcpu)
153 vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
154 else
155 vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
156 }
157
err_memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo)158 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
159 {
160 struct kvm_vcpu *vcpu = info.vcpu;
161
162 if (!vcpu)
163 return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
164 else
165 return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
166 }
167
168 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
169 ({ \
170 struct test_info __info = (info_p); \
171 struct mop_desc __desc = { \
172 .target = (mop_target_p), \
173 .mode = (access_mode_p), \
174 .buf = (buf_p), \
175 .size = (size_p), \
176 __VA_ARGS__ \
177 }; \
178 struct kvm_s390_mem_op __ksmo; \
179 \
180 if (__desc._gaddr_v) { \
181 if (__desc.target == ABSOLUTE) \
182 __desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
183 else \
184 __desc.gaddr = __desc.gaddr_v; \
185 } \
186 __ksmo = ksmo_from_desc(__desc); \
187 print_memop(__info.vcpu, &__ksmo); \
188 err##memop_ioctl(__info, &__ksmo); \
189 })
190
191 #define MOP(...) MEMOP(, __VA_ARGS__)
192 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
193
194 #define GADDR(a) .gaddr = ((uintptr_t)a)
195 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
196 #define CHECK_ONLY .f_check = 1
197 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
198 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
199 #define AR(a) ._ar = 1, .ar = (a)
200 #define KEY(a) .f_key = 1, .key = (a)
201 #define INJECT .f_inject = 1
202
203 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
204
205 #define PAGE_SHIFT 12
206 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
207 #define PAGE_MASK (~(PAGE_SIZE - 1))
208 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
209 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
210
211 static uint8_t mem1[65536];
212 static uint8_t mem2[65536];
213
214 struct test_default {
215 struct kvm_vm *kvm_vm;
216 struct test_info vm;
217 struct test_info vcpu;
218 struct kvm_run *run;
219 int size;
220 };
221
test_default_init(void * guest_code)222 static struct test_default test_default_init(void *guest_code)
223 {
224 struct kvm_vcpu *vcpu;
225 struct test_default t;
226
227 t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
228 t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
229 t.vm = (struct test_info) { t.kvm_vm, NULL };
230 t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
231 t.run = vcpu->run;
232 return t;
233 }
234
235 enum stage {
236 /* Synced state set by host, e.g. DAT */
237 STAGE_INITED,
238 /* Guest did nothing */
239 STAGE_IDLED,
240 /* Guest set storage keys (specifics up to test case) */
241 STAGE_SKEYS_SET,
242 /* Guest copied memory (locations up to test case) */
243 STAGE_COPIED,
244 };
245
246 #define HOST_SYNC(info_p, stage) \
247 ({ \
248 struct test_info __info = (info_p); \
249 struct kvm_vcpu *__vcpu = __info.vcpu; \
250 struct ucall uc; \
251 int __stage = (stage); \
252 \
253 vcpu_run(__vcpu); \
254 get_ucall(__vcpu, &uc); \
255 ASSERT_EQ(uc.cmd, UCALL_SYNC); \
256 ASSERT_EQ(uc.args[1], __stage); \
257 }) \
258
prepare_mem12(void)259 static void prepare_mem12(void)
260 {
261 int i;
262
263 for (i = 0; i < sizeof(mem1); i++)
264 mem1[i] = rand();
265 memset(mem2, 0xaa, sizeof(mem2));
266 }
267
268 #define ASSERT_MEM_EQ(p1, p2, size) \
269 TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
270
271 #define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
272 ({ \
273 struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
274 enum mop_target __target = (mop_target_p); \
275 uint32_t __size = (size); \
276 \
277 prepare_mem12(); \
278 CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
279 GADDR_V(mem1), ##__VA_ARGS__); \
280 HOST_SYNC(__copy_cpu, STAGE_COPIED); \
281 CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
282 GADDR_V(mem2), ##__VA_ARGS__); \
283 ASSERT_MEM_EQ(mem1, mem2, __size); \
284 })
285
286 #define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
287 ({ \
288 struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
289 enum mop_target __target = (mop_target_p); \
290 uint32_t __size = (size); \
291 \
292 prepare_mem12(); \
293 CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
294 GADDR_V(mem1)); \
295 HOST_SYNC(__copy_cpu, STAGE_COPIED); \
296 CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
297 ASSERT_MEM_EQ(mem1, mem2, __size); \
298 })
299
guest_copy(void)300 static void guest_copy(void)
301 {
302 GUEST_SYNC(STAGE_INITED);
303 memcpy(&mem2, &mem1, sizeof(mem2));
304 GUEST_SYNC(STAGE_COPIED);
305 }
306
test_copy(void)307 static void test_copy(void)
308 {
309 struct test_default t = test_default_init(guest_copy);
310
311 HOST_SYNC(t.vcpu, STAGE_INITED);
312
313 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
314
315 kvm_vm_free(t.kvm_vm);
316 }
317
set_storage_key_range(void * addr,size_t len,uint8_t key)318 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
319 {
320 uintptr_t _addr, abs, i;
321 int not_mapped = 0;
322
323 _addr = (uintptr_t)addr;
324 for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
325 abs = i;
326 asm volatile (
327 "lra %[abs], 0(0,%[abs])\n"
328 " jz 0f\n"
329 " llill %[not_mapped],1\n"
330 " j 1f\n"
331 "0: sske %[key], %[abs]\n"
332 "1:"
333 : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
334 : [key] "r" (key)
335 : "cc"
336 );
337 GUEST_ASSERT_EQ(not_mapped, 0);
338 }
339 }
340
guest_copy_key(void)341 static void guest_copy_key(void)
342 {
343 set_storage_key_range(mem1, sizeof(mem1), 0x90);
344 set_storage_key_range(mem2, sizeof(mem2), 0x90);
345 GUEST_SYNC(STAGE_SKEYS_SET);
346
347 for (;;) {
348 memcpy(&mem2, &mem1, sizeof(mem2));
349 GUEST_SYNC(STAGE_COPIED);
350 }
351 }
352
test_copy_key(void)353 static void test_copy_key(void)
354 {
355 struct test_default t = test_default_init(guest_copy_key);
356
357 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
358
359 /* vm, no key */
360 DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
361
362 /* vm/vcpu, machting key or key 0 */
363 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
364 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
365 DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
366 DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
367 /*
368 * There used to be different code paths for key handling depending on
369 * if the region crossed a page boundary.
370 * There currently are not, but the more tests the merrier.
371 */
372 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
373 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
374 DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
375 DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
376
377 /* vm/vcpu, mismatching keys on read, but no fetch protection */
378 DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
379 DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
380
381 kvm_vm_free(t.kvm_vm);
382 }
383
guest_copy_key_fetch_prot(void)384 static void guest_copy_key_fetch_prot(void)
385 {
386 /*
387 * For some reason combining the first sync with override enablement
388 * results in an exception when calling HOST_SYNC.
389 */
390 GUEST_SYNC(STAGE_INITED);
391 /* Storage protection override applies to both store and fetch. */
392 set_storage_key_range(mem1, sizeof(mem1), 0x98);
393 set_storage_key_range(mem2, sizeof(mem2), 0x98);
394 GUEST_SYNC(STAGE_SKEYS_SET);
395
396 for (;;) {
397 memcpy(&mem2, &mem1, sizeof(mem2));
398 GUEST_SYNC(STAGE_COPIED);
399 }
400 }
401
test_copy_key_storage_prot_override(void)402 static void test_copy_key_storage_prot_override(void)
403 {
404 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
405
406 HOST_SYNC(t.vcpu, STAGE_INITED);
407 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
408 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
409 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
410
411 /* vcpu, mismatching keys, storage protection override in effect */
412 DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
413
414 kvm_vm_free(t.kvm_vm);
415 }
416
test_copy_key_fetch_prot(void)417 static void test_copy_key_fetch_prot(void)
418 {
419 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
420
421 HOST_SYNC(t.vcpu, STAGE_INITED);
422 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
423
424 /* vm/vcpu, matching key, fetch protection in effect */
425 DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
426 DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
427
428 kvm_vm_free(t.kvm_vm);
429 }
430
431 #define ERR_PROT_MOP(...) \
432 ({ \
433 int rv; \
434 \
435 rv = ERR_MOP(__VA_ARGS__); \
436 TEST_ASSERT(rv == 4, "Should result in protection exception"); \
437 })
438
guest_error_key(void)439 static void guest_error_key(void)
440 {
441 GUEST_SYNC(STAGE_INITED);
442 set_storage_key_range(mem1, PAGE_SIZE, 0x18);
443 set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
444 GUEST_SYNC(STAGE_SKEYS_SET);
445 GUEST_SYNC(STAGE_IDLED);
446 }
447
test_errors_key(void)448 static void test_errors_key(void)
449 {
450 struct test_default t = test_default_init(guest_error_key);
451
452 HOST_SYNC(t.vcpu, STAGE_INITED);
453 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
454
455 /* vm/vcpu, mismatching keys, fetch protection in effect */
456 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
457 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
458 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
459 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
460
461 kvm_vm_free(t.kvm_vm);
462 }
463
test_termination(void)464 static void test_termination(void)
465 {
466 struct test_default t = test_default_init(guest_error_key);
467 uint64_t prefix;
468 uint64_t teid;
469 uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
470 uint64_t psw[2];
471
472 HOST_SYNC(t.vcpu, STAGE_INITED);
473 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
474
475 /* vcpu, mismatching keys after first page */
476 ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
477 /*
478 * The memop injected a program exception and the test needs to check the
479 * Translation-Exception Identification (TEID). It is necessary to run
480 * the guest in order to be able to read the TEID from guest memory.
481 * Set the guest program new PSW, so the guest state is not clobbered.
482 */
483 prefix = t.run->s.regs.prefix;
484 psw[0] = t.run->psw_mask;
485 psw[1] = t.run->psw_addr;
486 MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
487 HOST_SYNC(t.vcpu, STAGE_IDLED);
488 MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
489 /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
490 ASSERT_EQ(teid & teid_mask, 0);
491
492 kvm_vm_free(t.kvm_vm);
493 }
494
test_errors_key_storage_prot_override(void)495 static void test_errors_key_storage_prot_override(void)
496 {
497 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
498
499 HOST_SYNC(t.vcpu, STAGE_INITED);
500 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
501 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
502 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
503
504 /* vm, mismatching keys, storage protection override not applicable to vm */
505 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
506 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
507
508 kvm_vm_free(t.kvm_vm);
509 }
510
511 const uint64_t last_page_addr = -PAGE_SIZE;
512
guest_copy_key_fetch_prot_override(void)513 static void guest_copy_key_fetch_prot_override(void)
514 {
515 int i;
516 char *page_0 = 0;
517
518 GUEST_SYNC(STAGE_INITED);
519 set_storage_key_range(0, PAGE_SIZE, 0x18);
520 set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
521 asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
522 GUEST_SYNC(STAGE_SKEYS_SET);
523
524 for (;;) {
525 for (i = 0; i < PAGE_SIZE; i++)
526 page_0[i] = mem1[i];
527 GUEST_SYNC(STAGE_COPIED);
528 }
529 }
530
test_copy_key_fetch_prot_override(void)531 static void test_copy_key_fetch_prot_override(void)
532 {
533 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
534 vm_vaddr_t guest_0_page, guest_last_page;
535
536 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
537 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
538 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
539 print_skip("did not allocate guest pages at required positions");
540 goto out;
541 }
542
543 HOST_SYNC(t.vcpu, STAGE_INITED);
544 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
545 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
546 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
547
548 /* vcpu, mismatching keys on fetch, fetch protection override applies */
549 prepare_mem12();
550 MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
551 HOST_SYNC(t.vcpu, STAGE_COPIED);
552 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
553 ASSERT_MEM_EQ(mem1, mem2, 2048);
554
555 /*
556 * vcpu, mismatching keys on fetch, fetch protection override applies,
557 * wraparound
558 */
559 prepare_mem12();
560 MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
561 HOST_SYNC(t.vcpu, STAGE_COPIED);
562 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
563 GADDR_V(guest_last_page), KEY(2));
564 ASSERT_MEM_EQ(mem1, mem2, 2048);
565
566 out:
567 kvm_vm_free(t.kvm_vm);
568 }
569
test_errors_key_fetch_prot_override_not_enabled(void)570 static void test_errors_key_fetch_prot_override_not_enabled(void)
571 {
572 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
573 vm_vaddr_t guest_0_page, guest_last_page;
574
575 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
576 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
577 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
578 print_skip("did not allocate guest pages at required positions");
579 goto out;
580 }
581 HOST_SYNC(t.vcpu, STAGE_INITED);
582 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
583
584 /* vcpu, mismatching keys on fetch, fetch protection override not enabled */
585 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
586
587 out:
588 kvm_vm_free(t.kvm_vm);
589 }
590
test_errors_key_fetch_prot_override_enabled(void)591 static void test_errors_key_fetch_prot_override_enabled(void)
592 {
593 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
594 vm_vaddr_t guest_0_page, guest_last_page;
595
596 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
597 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
598 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
599 print_skip("did not allocate guest pages at required positions");
600 goto out;
601 }
602 HOST_SYNC(t.vcpu, STAGE_INITED);
603 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
604 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
605 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
606
607 /*
608 * vcpu, mismatching keys on fetch,
609 * fetch protection override does not apply because memory range acceeded
610 */
611 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
612 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
613 GADDR_V(guest_last_page), KEY(2));
614 /* vm, fetch protected override does not apply */
615 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
616 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
617
618 out:
619 kvm_vm_free(t.kvm_vm);
620 }
621
guest_idle(void)622 static void guest_idle(void)
623 {
624 GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
625 for (;;)
626 GUEST_SYNC(STAGE_IDLED);
627 }
628
_test_errors_common(struct test_info info,enum mop_target target,int size)629 static void _test_errors_common(struct test_info info, enum mop_target target, int size)
630 {
631 int rv;
632
633 /* Bad size: */
634 rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
635 TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
636
637 /* Zero size: */
638 rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
639 TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
640 "ioctl allows 0 as size");
641
642 /* Bad flags: */
643 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
644 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
645
646 /* Bad guest address: */
647 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
648 TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
649
650 /* Bad host address: */
651 rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
652 TEST_ASSERT(rv == -1 && errno == EFAULT,
653 "ioctl does not report bad host memory address");
654
655 /* Bad key: */
656 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
657 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
658 }
659
test_errors(void)660 static void test_errors(void)
661 {
662 struct test_default t = test_default_init(guest_idle);
663 int rv;
664
665 HOST_SYNC(t.vcpu, STAGE_INITED);
666
667 _test_errors_common(t.vcpu, LOGICAL, t.size);
668 _test_errors_common(t.vm, ABSOLUTE, t.size);
669
670 /* Bad operation: */
671 rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
672 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
673 /* virtual addresses are not translated when passing INVALID */
674 rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
675 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
676
677 /* Bad access register: */
678 t.run->psw_mask &= ~(3UL << (63 - 17));
679 t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
680 HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
681 rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
682 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
683 t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
684 HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
685
686 /* Check that the SIDA calls are rejected for non-protected guests */
687 rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
688 TEST_ASSERT(rv == -1 && errno == EINVAL,
689 "ioctl does not reject SIDA_READ in non-protected mode");
690 rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
691 TEST_ASSERT(rv == -1 && errno == EINVAL,
692 "ioctl does not reject SIDA_WRITE in non-protected mode");
693
694 kvm_vm_free(t.kvm_vm);
695 }
696
697 struct testdef {
698 const char *name;
699 void (*test)(void);
700 int extension;
701 } testlist[] = {
702 {
703 .name = "simple copy",
704 .test = test_copy,
705 },
706 {
707 .name = "generic error checks",
708 .test = test_errors,
709 },
710 {
711 .name = "copy with storage keys",
712 .test = test_copy_key,
713 .extension = 1,
714 },
715 {
716 .name = "copy with key storage protection override",
717 .test = test_copy_key_storage_prot_override,
718 .extension = 1,
719 },
720 {
721 .name = "copy with key fetch protection",
722 .test = test_copy_key_fetch_prot,
723 .extension = 1,
724 },
725 {
726 .name = "copy with key fetch protection override",
727 .test = test_copy_key_fetch_prot_override,
728 .extension = 1,
729 },
730 {
731 .name = "error checks with key",
732 .test = test_errors_key,
733 .extension = 1,
734 },
735 {
736 .name = "termination",
737 .test = test_termination,
738 .extension = 1,
739 },
740 {
741 .name = "error checks with key storage protection override",
742 .test = test_errors_key_storage_prot_override,
743 .extension = 1,
744 },
745 {
746 .name = "error checks without key fetch prot override",
747 .test = test_errors_key_fetch_prot_override_not_enabled,
748 .extension = 1,
749 },
750 {
751 .name = "error checks with key fetch prot override",
752 .test = test_errors_key_fetch_prot_override_enabled,
753 .extension = 1,
754 },
755 };
756
main(int argc,char * argv[])757 int main(int argc, char *argv[])
758 {
759 int extension_cap, idx;
760
761 TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
762
763 setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
764
765 ksft_print_header();
766
767 ksft_set_plan(ARRAY_SIZE(testlist));
768
769 extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
770 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
771 if (extension_cap >= testlist[idx].extension) {
772 testlist[idx].test();
773 ksft_test_result_pass("%s\n", testlist[idx].name);
774 } else {
775 ksft_test_result_skip("%s - extension level %d not supported\n",
776 testlist[idx].name,
777 testlist[idx].extension);
778 }
779 }
780
781 ksft_finished(); /* Print results and exit() accordingly */
782 }
783