1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AArch64 code
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8 #include <linux/compiler.h>
9 #include <assert.h>
10
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14
15 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
16
17 static vm_vaddr_t exception_handlers;
18
page_align(struct kvm_vm * vm,uint64_t v)19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
20 {
21 return (v + vm->page_size) & ~(vm->page_size - 1);
22 }
23
pgd_index(struct kvm_vm * vm,vm_vaddr_t gva)24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
25 {
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
28
29 return (gva >> shift) & mask;
30 }
31
pud_index(struct kvm_vm * vm,vm_vaddr_t gva)32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
33 {
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
36
37 TEST_ASSERT(vm->pgtable_levels == 4,
38 "Mode %d does not have 4 page table levels", vm->mode);
39
40 return (gva >> shift) & mask;
41 }
42
pmd_index(struct kvm_vm * vm,vm_vaddr_t gva)43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
44 {
45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
46 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
47
48 TEST_ASSERT(vm->pgtable_levels >= 3,
49 "Mode %d does not have >= 3 page table levels", vm->mode);
50
51 return (gva >> shift) & mask;
52 }
53
pte_index(struct kvm_vm * vm,vm_vaddr_t gva)54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
55 {
56 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
57 return (gva >> vm->page_shift) & mask;
58 }
59
pte_addr(struct kvm_vm * vm,uint64_t entry)60 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
61 {
62 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
63 return entry & mask;
64 }
65
ptrs_per_pgd(struct kvm_vm * vm)66 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
67 {
68 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
69 return 1 << (vm->va_bits - shift);
70 }
71
ptrs_per_pte(struct kvm_vm * vm)72 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
73 {
74 return 1 << (vm->page_shift - 3);
75 }
76
virt_arch_pgd_alloc(struct kvm_vm * vm)77 void virt_arch_pgd_alloc(struct kvm_vm *vm)
78 {
79 if (!vm->pgd_created) {
80 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
81 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
82 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
83 vm->pgd = paddr;
84 vm->pgd_created = true;
85 }
86 }
87
_virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t flags)88 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
89 uint64_t flags)
90 {
91 uint8_t attr_idx = flags & 7;
92 uint64_t *ptep;
93
94 TEST_ASSERT((vaddr % vm->page_size) == 0,
95 "Virtual address not on page boundary,\n"
96 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
97 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
98 (vaddr >> vm->page_shift)),
99 "Invalid virtual address, vaddr: 0x%lx", vaddr);
100 TEST_ASSERT((paddr % vm->page_size) == 0,
101 "Physical address not on page boundary,\n"
102 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
103 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
104 "Physical address beyond beyond maximum supported,\n"
105 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
106 paddr, vm->max_gfn, vm->page_size);
107
108 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
109 if (!*ptep)
110 *ptep = vm_alloc_page_table(vm) | 3;
111
112 switch (vm->pgtable_levels) {
113 case 4:
114 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
115 if (!*ptep)
116 *ptep = vm_alloc_page_table(vm) | 3;
117 /* fall through */
118 case 3:
119 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
120 if (!*ptep)
121 *ptep = vm_alloc_page_table(vm) | 3;
122 /* fall through */
123 case 2:
124 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
125 break;
126 default:
127 TEST_FAIL("Page table levels must be 2, 3, or 4");
128 }
129
130 *ptep = paddr | 3;
131 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
132 }
133
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)134 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
135 {
136 uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
137
138 _virt_pg_map(vm, vaddr, paddr, attr_idx);
139 }
140
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)141 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
142 {
143 uint64_t *ptep;
144
145 if (!vm->pgd_created)
146 goto unmapped_gva;
147
148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
149 if (!ptep)
150 goto unmapped_gva;
151
152 switch (vm->pgtable_levels) {
153 case 4:
154 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
155 if (!ptep)
156 goto unmapped_gva;
157 /* fall through */
158 case 3:
159 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
160 if (!ptep)
161 goto unmapped_gva;
162 /* fall through */
163 case 2:
164 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
165 if (!ptep)
166 goto unmapped_gva;
167 break;
168 default:
169 TEST_FAIL("Page table levels must be 2, 3, or 4");
170 }
171
172 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
173
174 unmapped_gva:
175 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
176 exit(1);
177 }
178
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)179 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
180 {
181 #ifdef DEBUG
182 static const char * const type[] = { "", "pud", "pmd", "pte" };
183 uint64_t pte, *ptep;
184
185 if (level == 4)
186 return;
187
188 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
189 ptep = addr_gpa2hva(vm, pte);
190 if (!*ptep)
191 continue;
192 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
193 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
194 }
195 #endif
196 }
197
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)198 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
199 {
200 int level = 4 - (vm->pgtable_levels - 1);
201 uint64_t pgd, *ptep;
202
203 if (!vm->pgd_created)
204 return;
205
206 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
207 ptep = addr_gpa2hva(vm, pgd);
208 if (!*ptep)
209 continue;
210 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
211 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
212 }
213 }
214
aarch64_vcpu_setup(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)215 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
216 {
217 struct kvm_vcpu_init default_init = { .target = -1, };
218 struct kvm_vm *vm = vcpu->vm;
219 uint64_t sctlr_el1, tcr_el1;
220
221 if (!init)
222 init = &default_init;
223
224 if (init->target == -1) {
225 struct kvm_vcpu_init preferred;
226 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
227 init->target = preferred.target;
228 }
229
230 vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
231
232 /*
233 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
234 * registers, which the variable argument list macros do.
235 */
236 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
237
238 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
239 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
240
241 /* Configure base granule size */
242 switch (vm->mode) {
243 case VM_MODE_P52V48_4K:
244 TEST_FAIL("AArch64 does not support 4K sized pages "
245 "with 52-bit physical address ranges");
246 case VM_MODE_PXXV48_4K:
247 TEST_FAIL("AArch64 does not support 4K sized pages "
248 "with ANY-bit physical address ranges");
249 case VM_MODE_P52V48_64K:
250 case VM_MODE_P48V48_64K:
251 case VM_MODE_P40V48_64K:
252 case VM_MODE_P36V48_64K:
253 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
254 break;
255 case VM_MODE_P48V48_16K:
256 case VM_MODE_P40V48_16K:
257 case VM_MODE_P36V48_16K:
258 case VM_MODE_P36V47_16K:
259 tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
260 break;
261 case VM_MODE_P48V48_4K:
262 case VM_MODE_P40V48_4K:
263 case VM_MODE_P36V48_4K:
264 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
265 break;
266 default:
267 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
268 }
269
270 /* Configure output size */
271 switch (vm->mode) {
272 case VM_MODE_P52V48_64K:
273 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
274 break;
275 case VM_MODE_P48V48_4K:
276 case VM_MODE_P48V48_16K:
277 case VM_MODE_P48V48_64K:
278 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
279 break;
280 case VM_MODE_P40V48_4K:
281 case VM_MODE_P40V48_16K:
282 case VM_MODE_P40V48_64K:
283 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
284 break;
285 case VM_MODE_P36V48_4K:
286 case VM_MODE_P36V48_16K:
287 case VM_MODE_P36V48_64K:
288 case VM_MODE_P36V47_16K:
289 tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
290 break;
291 default:
292 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
293 }
294
295 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
296 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
297 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
298 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
299
300 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
301 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
302 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
303 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
304 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
305 }
306
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)307 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
308 {
309 uint64_t pstate, pc;
310
311 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
312 vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
313
314 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
315 indent, "", pstate, pc);
316 }
317
aarch64_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_vcpu_init * init,void * guest_code)318 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
319 struct kvm_vcpu_init *init, void *guest_code)
320 {
321 size_t stack_size = vm->page_size == 4096 ?
322 DEFAULT_STACK_PGS * vm->page_size :
323 vm->page_size;
324 uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
325 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
326 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
327
328 aarch64_vcpu_setup(vcpu, init);
329
330 vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
331 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
332
333 return vcpu;
334 }
335
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)336 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
337 void *guest_code)
338 {
339 return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
340 }
341
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)342 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
343 {
344 va_list ap;
345 int i;
346
347 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
348 " num: %u\n", num);
349
350 va_start(ap, num);
351
352 for (i = 0; i < num; i++) {
353 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
354 va_arg(ap, uint64_t));
355 }
356
357 va_end(ap);
358 }
359
kvm_exit_unexpected_exception(int vector,uint64_t ec,bool valid_ec)360 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
361 {
362 ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
363 while (1)
364 ;
365 }
366
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)367 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
368 {
369 struct ucall uc;
370
371 if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
372 return;
373
374 if (uc.args[2]) /* valid_ec */ {
375 assert(VECTOR_IS_SYNC(uc.args[0]));
376 TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
377 uc.args[0], uc.args[1]);
378 } else {
379 assert(!VECTOR_IS_SYNC(uc.args[0]));
380 TEST_FAIL("Unexpected exception (vector:0x%lx)",
381 uc.args[0]);
382 }
383 }
384
385 struct handlers {
386 handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
387 };
388
vcpu_init_descriptor_tables(struct kvm_vcpu * vcpu)389 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
390 {
391 extern char vectors;
392
393 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
394 }
395
route_exception(struct ex_regs * regs,int vector)396 void route_exception(struct ex_regs *regs, int vector)
397 {
398 struct handlers *handlers = (struct handlers *)exception_handlers;
399 bool valid_ec;
400 int ec = 0;
401
402 switch (vector) {
403 case VECTOR_SYNC_CURRENT:
404 case VECTOR_SYNC_LOWER_64:
405 ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
406 valid_ec = true;
407 break;
408 case VECTOR_IRQ_CURRENT:
409 case VECTOR_IRQ_LOWER_64:
410 case VECTOR_FIQ_CURRENT:
411 case VECTOR_FIQ_LOWER_64:
412 case VECTOR_ERROR_CURRENT:
413 case VECTOR_ERROR_LOWER_64:
414 ec = 0;
415 valid_ec = false;
416 break;
417 default:
418 valid_ec = false;
419 goto unexpected_exception;
420 }
421
422 if (handlers && handlers->exception_handlers[vector][ec])
423 return handlers->exception_handlers[vector][ec](regs);
424
425 unexpected_exception:
426 kvm_exit_unexpected_exception(vector, ec, valid_ec);
427 }
428
vm_init_descriptor_tables(struct kvm_vm * vm)429 void vm_init_descriptor_tables(struct kvm_vm *vm)
430 {
431 vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
432 vm->page_size);
433
434 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
435 }
436
vm_install_sync_handler(struct kvm_vm * vm,int vector,int ec,void (* handler)(struct ex_regs *))437 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
438 void (*handler)(struct ex_regs *))
439 {
440 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
441
442 assert(VECTOR_IS_SYNC(vector));
443 assert(vector < VECTOR_NUM);
444 assert(ec < ESR_EC_NUM);
445 handlers->exception_handlers[vector][ec] = handler;
446 }
447
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))448 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
449 void (*handler)(struct ex_regs *))
450 {
451 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
452
453 assert(!VECTOR_IS_SYNC(vector));
454 assert(vector < VECTOR_NUM);
455 handlers->exception_handlers[vector][0] = handler;
456 }
457
guest_get_vcpuid(void)458 uint32_t guest_get_vcpuid(void)
459 {
460 return read_sysreg(tpidr_el1);
461 }
462
aarch64_get_supported_page_sizes(uint32_t ipa,bool * ps4k,bool * ps16k,bool * ps64k)463 void aarch64_get_supported_page_sizes(uint32_t ipa,
464 bool *ps4k, bool *ps16k, bool *ps64k)
465 {
466 struct kvm_vcpu_init preferred_init;
467 int kvm_fd, vm_fd, vcpu_fd, err;
468 uint64_t val;
469 struct kvm_one_reg reg = {
470 .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
471 .addr = (uint64_t)&val,
472 };
473
474 kvm_fd = open_kvm_dev_path_or_exit();
475 vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
476 TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
477
478 vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
479 TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
480
481 err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
482 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
483 err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
484 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
485
486 err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
487 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
488
489 *ps4k = ((val >> 28) & 0xf) != 0xf;
490 *ps64k = ((val >> 24) & 0xf) == 0;
491 *ps16k = ((val >> 20) & 0xf) != 0;
492
493 close(vcpu_fd);
494 close(vm_fd);
495 close(kvm_fd);
496 }
497
498 /*
499 * arm64 doesn't have a true default mode, so start by computing the
500 * available IPA space and page sizes early.
501 */
init_guest_modes(void)502 void __attribute__((constructor)) init_guest_modes(void)
503 {
504 guest_modes_append_default();
505 }
506
smccc_hvc(uint32_t function_id,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,struct arm_smccc_res * res)507 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
508 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
509 uint64_t arg6, struct arm_smccc_res *res)
510 {
511 asm volatile("mov w0, %w[function_id]\n"
512 "mov x1, %[arg0]\n"
513 "mov x2, %[arg1]\n"
514 "mov x3, %[arg2]\n"
515 "mov x4, %[arg3]\n"
516 "mov x5, %[arg4]\n"
517 "mov x6, %[arg5]\n"
518 "mov x7, %[arg6]\n"
519 "hvc #0\n"
520 "mov %[res0], x0\n"
521 "mov %[res1], x1\n"
522 "mov %[res2], x2\n"
523 "mov %[res3], x3\n"
524 : [res0] "=r"(res->a0), [res1] "=r"(res->a1),
525 [res2] "=r"(res->a2), [res3] "=r"(res->a3)
526 : [function_id] "r"(function_id), [arg0] "r"(arg0),
527 [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),
528 [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
529 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
530 }
531