Lines Matching +full:bp +full:- +full:disabled +full:- +full:ports
1 // SPDX-License-Identifier: GPL-2.0-only
13 #include <linux/percpu-defs.h>
25 #include <linux/psp-sev.h>
26 #include <uapi/linux/sev-guest.h>
31 #include <asm/insn-eval.h>
62 /* For early boot hypervisor communication in SEV-ES enabled guests */
74 /* #VC handler runtime per-CPU data */
88 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
93 * This is necessary for example in the #VC->NMI->#VC case when the NMI
104 * Cached DR7 value - write it on DR7 writes and return it on reads.
106 * is currently unsupported in SEV-ES guests.
123 * per-CPU GHCB has been created and registered and thus can be
126 * For APs, the per-CPU GHCB is created before they are started
139 unsigned long sp = regs->sp; in on_vc_stack()
141 /* User-mode RSP is not trusted */ in on_vc_stack()
145 /* SYSCALL gap still has user-mode RSP */ in on_vc_stack()
177 * value below regs->sp, so that the interrupted stack frame is in __sev_es_ist_enter()
181 new_ist = regs->sp; in __sev_es_ist_enter()
187 new_ist -= sizeof(old_ist); in __sev_es_ist_enter()
209 * Nothing shall interrupt this code path while holding the per-CPU
222 ghcb = &data->ghcb_page; in __sev_get_ghcb()
224 if (unlikely(data->ghcb_active)) { in __sev_get_ghcb()
225 /* GHCB is already in use - save its contents */ in __sev_get_ghcb()
227 if (unlikely(data->backup_ghcb_active)) { in __sev_get_ghcb()
229 * Backup-GHCB is also already in use. There is no way in __sev_get_ghcb()
234 data->ghcb_active = false; in __sev_get_ghcb()
235 data->backup_ghcb_active = false; in __sev_get_ghcb()
243 data->backup_ghcb_active = true; in __sev_get_ghcb()
245 state->ghcb = &data->backup_ghcb; in __sev_get_ghcb()
248 *state->ghcb = *ghcb; in __sev_get_ghcb()
250 state->ghcb = NULL; in __sev_get_ghcb()
251 data->ghcb_active = true; in __sev_get_ghcb()
275 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_fetch_insn_kernel()
283 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); in __vc_decode_user_insn()
286 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_user_insn()
287 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; in __vc_decode_user_insn()
288 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn()
290 } else if (insn_bytes == -EINVAL) { in __vc_decode_user_insn()
292 ctxt->fi.vector = X86_TRAP_GP; in __vc_decode_user_insn()
293 ctxt->fi.error_code = 0; in __vc_decode_user_insn()
294 ctxt->fi.cr2 = 0; in __vc_decode_user_insn()
298 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) in __vc_decode_user_insn()
301 if (ctxt->insn.immediate.got) in __vc_decode_user_insn()
314 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_kern_insn()
315 ctxt->fi.error_code = X86_PF_INSTR; in __vc_decode_kern_insn()
316 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_kern_insn()
320 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); in __vc_decode_kern_insn()
329 if (user_mode(ctxt->regs)) in vc_decode_insn()
347 * allowed to sleep. The page-fault handler detects that it is running in vc_write_mem()
404 if (user_mode(ctxt->regs)) in vc_write_mem()
407 ctxt->fi.vector = X86_TRAP_PF; in vc_write_mem()
408 ctxt->fi.error_code = error_code; in vc_write_mem()
409 ctxt->fi.cr2 = (unsigned long)dst; in vc_write_mem()
426 * allowed to sleep. The page-fault handler detects that it is running in vc_read_mem()
482 if (user_mode(ctxt->regs)) in vc_read_mem()
485 ctxt->fi.vector = X86_TRAP_PF; in vc_read_mem()
486 ctxt->fi.error_code = error_code; in vc_read_mem()
487 ctxt->fi.cr2 = (unsigned long)src; in vc_read_mem()
505 ctxt->fi.vector = X86_TRAP_PF; in vc_slow_virt_to_phys()
506 ctxt->fi.cr2 = vaddr; in vc_slow_virt_to_phys()
507 ctxt->fi.error_code = 0; in vc_slow_virt_to_phys()
509 if (user_mode(ctxt->regs)) in vc_slow_virt_to_phys()
510 ctxt->fi.error_code |= X86_PF_USER; in vc_slow_virt_to_phys()
531 if (user_mode(ctxt->regs)) { in vc_ioio_check()
532 struct thread_struct *t = ¤t->thread; in vc_ioio_check()
533 struct io_bitmap *iobm = t->io_bitmap; in vc_ioio_check()
540 if (test_bit(idx, iobm->bitmap)) in vc_ioio_check()
548 ctxt->fi.vector = X86_TRAP_GP; in vc_ioio_check()
549 ctxt->fi.error_code = 0; in vc_ioio_check()
554 /* Include code shared with pre-decompression boot stage */
555 #include "sev-shared.c"
565 ghcb = &data->ghcb_page; in __sev_put_ghcb()
567 if (state->ghcb) { in __sev_put_ghcb()
569 *ghcb = *state->ghcb; in __sev_put_ghcb()
570 data->backup_ghcb_active = false; in __sev_put_ghcb()
571 state->ghcb = NULL; in __sev_put_ghcb()
578 data->ghcb_active = false; in __sev_put_ghcb()
621 /* smoke-test the secrets page passed */ in get_secrets_page()
646 addr = layout->os_area.ap_jump_table_pa; in get_snp_jump_table_addr()
676 ret = ghcb->save.sw_exit_info_2; in get_jump_table_addr()
804 hdr = &data->hdr; in __set_pages_state()
805 e = data->entries; in __set_pages_state()
810 while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) { in __set_pages_state()
811 hdr->end_entry = i; in __set_pages_state()
821 e->gfn = pfn; in __set_pages_state()
822 e->operation = op; in __set_pages_state()
825 (vaddr_end - vaddr) >= PMD_SIZE) { in __set_pages_state()
826 e->pagesize = RMP_PG_SIZE_2M; in __set_pages_state()
829 e->pagesize = RMP_PG_SIZE_4K; in __set_pages_state()
904 npages = (end - start) >> PAGE_SHIFT; in snp_accept_memory()
944 * Allocate an 8k page which is also 8k-aligned. in snp_alloc_vmsa_page()
984 return -EOPNOTSUPP; in wakeup_cpu_via_vmgexit()
991 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start, in wakeup_cpu_via_vmgexit()
993 return -EINVAL; in wakeup_cpu_via_vmgexit()
996 start_ip = real_mode_header->sev_es_trampoline_start; in wakeup_cpu_via_vmgexit()
1004 return -EINVAL; in wakeup_cpu_via_vmgexit()
1017 return -ENOMEM; in wakeup_cpu_via_vmgexit()
1024 vmsa->cs.base = sipi_vector << 12; in wakeup_cpu_via_vmgexit()
1025 vmsa->cs.limit = AP_INIT_CS_LIMIT; in wakeup_cpu_via_vmgexit()
1026 vmsa->cs.attrib = INIT_CS_ATTRIBS; in wakeup_cpu_via_vmgexit()
1027 vmsa->cs.selector = sipi_vector << 8; in wakeup_cpu_via_vmgexit()
1030 vmsa->rip = start_ip & 0xfff; in wakeup_cpu_via_vmgexit()
1033 vmsa->ds.limit = AP_INIT_DS_LIMIT; in wakeup_cpu_via_vmgexit()
1034 vmsa->ds.attrib = INIT_DS_ATTRIBS; in wakeup_cpu_via_vmgexit()
1035 vmsa->es = vmsa->ds; in wakeup_cpu_via_vmgexit()
1036 vmsa->fs = vmsa->ds; in wakeup_cpu_via_vmgexit()
1037 vmsa->gs = vmsa->ds; in wakeup_cpu_via_vmgexit()
1038 vmsa->ss = vmsa->ds; in wakeup_cpu_via_vmgexit()
1040 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT; in wakeup_cpu_via_vmgexit()
1041 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT; in wakeup_cpu_via_vmgexit()
1042 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS; in wakeup_cpu_via_vmgexit()
1043 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT; in wakeup_cpu_via_vmgexit()
1044 vmsa->tr.limit = AP_INIT_TR_LIMIT; in wakeup_cpu_via_vmgexit()
1045 vmsa->tr.attrib = INIT_TR_ATTRIBS; in wakeup_cpu_via_vmgexit()
1047 vmsa->cr4 = cr4; in wakeup_cpu_via_vmgexit()
1048 vmsa->cr0 = AP_INIT_CR0_DEFAULT; in wakeup_cpu_via_vmgexit()
1049 vmsa->dr7 = DR7_RESET_VALUE; in wakeup_cpu_via_vmgexit()
1050 vmsa->dr6 = AP_INIT_DR6_DEFAULT; in wakeup_cpu_via_vmgexit()
1051 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT; in wakeup_cpu_via_vmgexit()
1052 vmsa->g_pat = AP_INIT_GPAT_DEFAULT; in wakeup_cpu_via_vmgexit()
1053 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT; in wakeup_cpu_via_vmgexit()
1054 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT; in wakeup_cpu_via_vmgexit()
1055 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT; in wakeup_cpu_via_vmgexit()
1056 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT; in wakeup_cpu_via_vmgexit()
1059 vmsa->efer = EFER_SVME; in wakeup_cpu_via_vmgexit()
1062 * Set the SNP-specific fields for this VMSA: in wakeup_cpu_via_vmgexit()
1066 vmsa->vmpl = 0; in wakeup_cpu_via_vmgexit()
1067 vmsa->sev_features = sev_status >> 2; in wakeup_cpu_via_vmgexit()
1075 return -EINVAL; in wakeup_cpu_via_vmgexit()
1084 ghcb_set_rax(ghcb, vmsa->sev_features); in wakeup_cpu_via_vmgexit()
1093 lower_32_bits(ghcb->save.sw_exit_info_1)) { in wakeup_cpu_via_vmgexit()
1095 ret = -EINVAL; in wakeup_cpu_via_vmgexit()
1144 /* Check if AP Jump Table is page-aligned */ in sev_es_setup_ap_jump_table()
1146 return -EINVAL; in sev_es_setup_ap_jump_table()
1150 startup_cs = (u16)(rmh->trampoline_start >> 4); in sev_es_setup_ap_jump_table()
1151 startup_ip = (u16)(rmh->sev_es_trampoline_start - in sev_es_setup_ap_jump_table()
1152 rmh->trampoline_start); in sev_es_setup_ap_jump_table()
1156 return -EIO; in sev_es_setup_ap_jump_table()
1168 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1169 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1186 address = __pa(&data->ghcb_page); in sev_es_efi_map_ghcbs()
1198 struct pt_regs *regs = ctxt->regs; in vc_handle_msr()
1203 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; in vc_handle_msr()
1205 ghcb_set_rcx(ghcb, regs->cx); in vc_handle_msr()
1207 ghcb_set_rax(ghcb, regs->ax); in vc_handle_msr()
1208 ghcb_set_rdx(ghcb, regs->dx); in vc_handle_msr()
1214 regs->ax = ghcb->save.rax; in vc_handle_msr()
1215 regs->dx = ghcb->save.rdx; in vc_handle_msr()
1227 ghcb = &data->ghcb_page; in snp_register_per_cpu_ghcb()
1243 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling(). in setup_ghcb()
1245 * If SNP is active, register the per-CPU GHCB page so that the runtime in setup_ghcb()
1263 /* Alright - Make the boot-ghcb public */ in setup_ghcb()
1290 ghcb->save.sw_exit_info_2) in sev_es_ap_hlt_loop()
1298 * Play_dead handler when running under SEV-ES. This is needed because
1307 /* IRQs now disabled */ in sev_es_play_dead()
1336 panic("Can't allocate SEV-ES runtime data"); in alloc_runtime_data()
1348 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page, in init_ghcb()
1349 sizeof(data->ghcb_page)); in init_ghcb()
1353 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page)); in init_ghcb()
1355 data->ghcb_active = false; in init_ghcb()
1356 data->backup_ghcb_active = false; in init_ghcb()
1369 panic("SEV-ES CPU Features missing"); in sev_es_init_vc_handling()
1382 /* Initialize per-cpu GHCB pages */ in sev_es_init_vc_handling()
1396 int trapnr = ctxt->fi.vector; in vc_early_forward_exception()
1399 native_write_cr2(ctxt->fi.cr2); in vc_early_forward_exception()
1401 ctxt->regs->orig_ax = ctxt->fi.error_code; in vc_early_forward_exception()
1402 do_early_exception(ctxt->regs, trapnr); in vc_early_forward_exception()
1410 reg_array = (long *)ctxt->regs; in vc_insn_get_rm()
1411 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); in vc_insn_get_rm()
1429 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); in vc_do_mmio()
1430 if (ref == (void __user *)-1L) in vc_do_mmio()
1438 ctxt->fi.error_code |= X86_PF_WRITE; in vc_do_mmio()
1458 * Instead of playing games with walking page-tables and trying to guess
1467 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1481 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); in vc_handle_mmio_movs()
1482 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); in vc_handle_mmio_movs()
1484 if (ds_base == -1L || es_base == -1L) { in vc_handle_mmio_movs()
1485 ctxt->fi.vector = X86_TRAP_GP; in vc_handle_mmio_movs()
1486 ctxt->fi.error_code = 0; in vc_handle_mmio_movs()
1490 src = ds_base + (unsigned char *)ctxt->regs->si; in vc_handle_mmio_movs()
1491 dst = es_base + (unsigned char *)ctxt->regs->di; in vc_handle_mmio_movs()
1501 if (ctxt->regs->flags & X86_EFLAGS_DF) in vc_handle_mmio_movs()
1502 off = -bytes; in vc_handle_mmio_movs()
1506 ctxt->regs->si += off; in vc_handle_mmio_movs()
1507 ctxt->regs->di += off; in vc_handle_mmio_movs()
1509 rep = insn_has_rep_prefix(&ctxt->insn); in vc_handle_mmio_movs()
1511 ctxt->regs->cx -= 1; in vc_handle_mmio_movs()
1513 if (!rep || ctxt->regs->cx == 0) in vc_handle_mmio_movs()
1521 struct insn *insn = &ctxt->insn; in vc_handle_mmio()
1533 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); in vc_handle_mmio()
1538 if (user_mode(ctxt->regs)) in vc_handle_mmio()
1543 memcpy(ghcb->shared_buffer, reg_data, bytes); in vc_handle_mmio()
1547 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); in vc_handle_mmio()
1555 /* Zero-extend for 32-bit operation */ in vc_handle_mmio()
1559 memcpy(reg_data, ghcb->shared_buffer, bytes); in vc_handle_mmio()
1567 memset(reg_data, 0, insn->opnd_bytes); in vc_handle_mmio()
1568 memcpy(reg_data, ghcb->shared_buffer, bytes); in vc_handle_mmio()
1576 u8 *val = (u8 *)ghcb->shared_buffer; in vc_handle_mmio()
1580 u16 *val = (u16 *)ghcb->shared_buffer; in vc_handle_mmio()
1586 memset(reg_data, sign_byte, insn->opnd_bytes); in vc_handle_mmio()
1587 memcpy(reg_data, ghcb->shared_buffer, bytes); in vc_handle_mmio()
1617 ctxt->fi.vector = X86_TRAP_GP; in vc_handle_dr7_write()
1618 ctxt->fi.error_code = 0; in vc_handle_dr7_write()
1625 /* Early non-zero writes to DR7 are not supported */ in vc_handle_dr7_write()
1636 data->dr7 = val; in vc_handle_dr7_write()
1654 *reg = data->dr7; in vc_handle_dr7_read()
1671 ghcb_set_rcx(ghcb, ctxt->regs->cx); in vc_handle_rdpmc()
1680 ctxt->regs->ax = ghcb->save.rax; in vc_handle_rdpmc()
1681 ctxt->regs->dx = ghcb->save.rdx; in vc_handle_rdpmc()
1708 ghcb_set_rax(ghcb, ctxt->regs->ax); in vc_handle_vmmcall()
1709 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); in vc_handle_vmmcall()
1712 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); in vc_handle_vmmcall()
1721 ctxt->regs->ax = ghcb->save.rax; in vc_handle_vmmcall()
1724 * Call sev_es_hcall_finish() after regs->ax is already set. in vc_handle_vmmcall()
1729 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) in vc_handle_vmmcall()
1743 ctxt->fi.vector = X86_TRAP_AC; in vc_handle_trap_ac()
1744 ctxt->fi.error_code = 0; in vc_handle_trap_ac()
1811 long error_code = ctxt->fi.error_code; in vc_forward_exception()
1812 int trapnr = ctxt->fi.vector; in vc_forward_exception()
1814 ctxt->regs->orig_ax = ctxt->fi.error_code; in vc_forward_exception()
1818 exc_general_protection(ctxt->regs, error_code); in vc_forward_exception()
1821 exc_invalid_op(ctxt->regs); in vc_forward_exception()
1824 write_cr2(ctxt->fi.cr2); in vc_forward_exception()
1825 exc_page_fault(ctxt->regs, error_code); in vc_forward_exception()
1828 exc_alignment_check(ctxt->regs, error_code); in vc_forward_exception()
1831 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); in vc_forward_exception()
1846 prev_sp = regs->sp; in vc_from_invalid_context()
1874 /* Done - now check the result */ in vc_raw_handle_exception()
1880 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", in vc_raw_handle_exception()
1881 error_code, regs->ip); in vc_raw_handle_exception()
1885 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", in vc_raw_handle_exception()
1886 error_code, regs->ip); in vc_raw_handle_exception()
1890 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", in vc_raw_handle_exception()
1891 error_code, regs->ip); in vc_raw_handle_exception()
1904 * failed - can't continue so print debug information in vc_raw_handle_exception()
1928 * intercepted instructions or accesses to MMIO areas/IO ports. They can in DEFINE_IDTENTRY_VC_KERNEL()
1961 /* If that fails and we get here - just panic */ in DEFINE_IDTENTRY_VC_KERNEL()
1962 panic("Returned from Terminate-Request to Hypervisor\n"); in DEFINE_IDTENTRY_VC_KERNEL()
1988 * Do not kill the machine if user-space triggered the in DEFINE_IDTENTRY_VC_USER()
1989 * exception. Send SIGBUS instead and let user-space deal with in DEFINE_IDTENTRY_VC_USER()
2001 unsigned long exit_code = regs->orig_ax; in handle_vc_boot_ghcb()
2011 /* Done - now check the result */ in handle_vc_boot_ghcb()
2017 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", in handle_vc_boot_ghcb()
2018 exit_code, regs->ip); in handle_vc_boot_ghcb()
2021 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", in handle_vc_boot_ghcb()
2022 exit_code, regs->ip); in handle_vc_boot_ghcb()
2025 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", in handle_vc_boot_ghcb()
2026 exit_code, regs->ip); in handle_vc_boot_ghcb()
2051 * - when booted via the boot/decompress kernel:
2052 * - via boot_params
2054 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2055 * - via a setup_data entry, as defined by the Linux Boot Protocol
2059 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) in find_cc_blob() argument
2064 if (bp->cc_blob_address) { in find_cc_blob()
2065 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address; in find_cc_blob()
2074 cc_info = find_cc_blob_setup_data(bp); in find_cc_blob()
2079 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) in find_cc_blob()
2085 bool __init snp_init(struct boot_params *bp) in snp_init() argument
2089 if (!bp) in snp_init()
2092 cc_info = find_cc_blob(bp); in snp_init()
2102 bp->cc_blob_address = (u32)(unsigned long)cc_info; in snp_init()
2118 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2); in dump_cpuid_table()
2121 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in dump_cpuid_table()
2124 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx, in dump_cpuid_table()
2125 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved); in dump_cpuid_table()
2140 if (!cpuid_table->count) in report_cpuid_table()
2144 cpuid_table->count); in report_cpuid_table()
2163 pr_info("SEV command-line option '%s' was not recognized\n", s); in init_sev_config()
2178 rio->exitinfo2 = SEV_RET_NO_FW_CALL; in snp_issue_guest_request()
2181 * __sev_get_ghcb() needs to run with IRQs disabled because it is using in snp_issue_guest_request()
2182 * a per-CPU GHCB. in snp_issue_guest_request()
2188 ret = -EIO; in snp_issue_guest_request()
2195 ghcb_set_rax(ghcb, input->data_gpa); in snp_issue_guest_request()
2196 ghcb_set_rbx(ghcb, input->data_npages); in snp_issue_guest_request()
2199 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa); in snp_issue_guest_request()
2203 rio->exitinfo2 = ghcb->save.sw_exit_info_2; in snp_issue_guest_request()
2204 switch (rio->exitinfo2) { in snp_issue_guest_request()
2209 ret = -EAGAIN; in snp_issue_guest_request()
2215 input->data_npages = ghcb_get_rbx(ghcb); in snp_issue_guest_request()
2216 ret = -ENOSPC; in snp_issue_guest_request()
2221 ret = -EIO; in snp_issue_guest_request()
2235 .name = "sev-guest",
2236 .id = -1,
2245 return -ENODEV; in snp_init_platform_device()
2249 return -ENODEV; in snp_init_platform_device()
2253 return -ENODEV; in snp_init_platform_device()
2256 return -ENODEV; in snp_init_platform_device()