1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <asm/coco.h>
11 #include <asm/tdx.h>
12 #include <asm/vmx.h>
13 #include <asm/insn.h>
14 #include <asm/insn-eval.h>
15 #include <asm/pgtable.h>
16
17 /* MMIO direction */
18 #define EPT_READ 0
19 #define EPT_WRITE 1
20
21 /* Port I/O direction */
22 #define PORT_READ 0
23 #define PORT_WRITE 1
24
25 /* See Exit Qualification for I/O Instructions in VMX documentation */
26 #define VE_IS_IO_IN(e) ((e) & BIT(3))
27 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
28 #define VE_GET_PORT_NUM(e) ((e) >> 16)
29 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
30
31 #define ATTR_DEBUG BIT(0)
32 #define ATTR_SEPT_VE_DISABLE BIT(28)
33
34 /* TDX Module call error codes */
35 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
36 #define TDCALL_INVALID_OPERAND 0xc0000100
37
38 #define TDREPORT_SUBTYPE_0 0
39
40 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)41 noinstr void __tdx_hypercall_failed(void)
42 {
43 instrumentation_begin();
44 panic("TDVMCALL failed. TDX module bug?");
45 }
46
47 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)48 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
49 unsigned long p3, unsigned long p4)
50 {
51 struct tdx_hypercall_args args = {
52 .r10 = nr,
53 .r11 = p1,
54 .r12 = p2,
55 .r13 = p3,
56 .r14 = p4,
57 };
58
59 return __tdx_hypercall(&args);
60 }
61 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
62 #endif
63
64 /*
65 * Used for TDX guests to make calls directly to the TD module. This
66 * should only be used for calls that have no legitimate reason to fail
67 * or where the kernel can not survive the call failing.
68 */
tdx_module_call(u64 fn,u64 rcx,u64 rdx,u64 r8,u64 r9,struct tdx_module_output * out)69 static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
70 struct tdx_module_output *out)
71 {
72 if (__tdx_module_call(fn, rcx, rdx, r8, r9, out))
73 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
74 }
75
76 /**
77 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
78 * subtype 0) using TDG.MR.REPORT TDCALL.
79 * @reportdata: Address of the input buffer which contains user-defined
80 * REPORTDATA to be included into TDREPORT.
81 * @tdreport: Address of the output buffer to store TDREPORT.
82 *
83 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
84 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
85 * It is used in the TDX guest driver module to get the TDREPORT0.
86 *
87 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
88 * other TDCALL failures.
89 */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)90 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
91 {
92 u64 ret;
93
94 ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport),
95 virt_to_phys(reportdata), TDREPORT_SUBTYPE_0,
96 0, NULL);
97 if (ret) {
98 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
99 return -EINVAL;
100 return -EIO;
101 }
102
103 return 0;
104 }
105 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
106
tdx_panic(const char * msg)107 static void __noreturn tdx_panic(const char *msg)
108 {
109 struct tdx_hypercall_args args = {
110 .r10 = TDX_HYPERCALL_STANDARD,
111 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
112 .r12 = 0, /* Error code: 0 is Panic */
113 };
114 union {
115 /* Define register order according to the GHCI */
116 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
117
118 char str[64];
119 } message;
120
121 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
122 strncpy(message.str, msg, 64);
123
124 args.r8 = message.r8;
125 args.r9 = message.r9;
126 args.r14 = message.r14;
127 args.r15 = message.r15;
128 args.rdi = message.rdi;
129 args.rsi = message.rsi;
130 args.rbx = message.rbx;
131 args.rdx = message.rdx;
132
133 /*
134 * This hypercall should never return and it is not safe
135 * to keep the guest running. Call it forever if it
136 * happens to return.
137 */
138 while (1)
139 __tdx_hypercall(&args);
140 }
141
tdx_parse_tdinfo(u64 * cc_mask)142 static void tdx_parse_tdinfo(u64 *cc_mask)
143 {
144 struct tdx_module_output out;
145 unsigned int gpa_width;
146 u64 td_attr;
147
148 /*
149 * TDINFO TDX module call is used to get the TD execution environment
150 * information like GPA width, number of available vcpus, debug mode
151 * information, etc. More details about the ABI can be found in TDX
152 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
153 * [TDG.VP.INFO].
154 */
155 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out);
156
157 /*
158 * The highest bit of a guest physical address is the "sharing" bit.
159 * Set it for shared pages and clear it for private pages.
160 *
161 * The GPA width that comes out of this call is critical. TDX guests
162 * can not meaningfully run without it.
163 */
164 gpa_width = out.rcx & GENMASK(5, 0);
165 *cc_mask = BIT_ULL(gpa_width - 1);
166
167 /*
168 * The kernel can not handle #VE's when accessing normal kernel
169 * memory. Ensure that no #VE will be delivered for accesses to
170 * TD-private memory. Only VMM-shared memory (MMIO) will #VE.
171 */
172 td_attr = out.rdx;
173 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
174 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
175
176 /* Relax SEPT_VE_DISABLE check for debug TD. */
177 if (td_attr & ATTR_DEBUG)
178 pr_warn("%s\n", msg);
179 else
180 tdx_panic(msg);
181 }
182 }
183
184 /*
185 * The TDX module spec states that #VE may be injected for a limited set of
186 * reasons:
187 *
188 * - Emulation of the architectural #VE injection on EPT violation;
189 *
190 * - As a result of guest TD execution of a disallowed instruction,
191 * a disallowed MSR access, or CPUID virtualization;
192 *
193 * - A notification to the guest TD about anomalous behavior;
194 *
195 * The last one is opt-in and is not used by the kernel.
196 *
197 * The Intel Software Developer's Manual describes cases when instruction
198 * length field can be used in section "Information for VM Exits Due to
199 * Instruction Execution".
200 *
201 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
202 * information if #VE occurred due to instruction execution, but not for EPT
203 * violations.
204 */
ve_instr_len(struct ve_info * ve)205 static int ve_instr_len(struct ve_info *ve)
206 {
207 switch (ve->exit_reason) {
208 case EXIT_REASON_HLT:
209 case EXIT_REASON_MSR_READ:
210 case EXIT_REASON_MSR_WRITE:
211 case EXIT_REASON_CPUID:
212 case EXIT_REASON_IO_INSTRUCTION:
213 /* It is safe to use ve->instr_len for #VE due instructions */
214 return ve->instr_len;
215 case EXIT_REASON_EPT_VIOLATION:
216 /*
217 * For EPT violations, ve->insn_len is not defined. For those,
218 * the kernel must decode instructions manually and should not
219 * be using this function.
220 */
221 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
222 return 0;
223 default:
224 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
225 return ve->instr_len;
226 }
227 }
228
__halt(const bool irq_disabled)229 static u64 __cpuidle __halt(const bool irq_disabled)
230 {
231 struct tdx_hypercall_args args = {
232 .r10 = TDX_HYPERCALL_STANDARD,
233 .r11 = hcall_func(EXIT_REASON_HLT),
234 .r12 = irq_disabled,
235 };
236
237 /*
238 * Emulate HLT operation via hypercall. More info about ABI
239 * can be found in TDX Guest-Host-Communication Interface
240 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
241 *
242 * The VMM uses the "IRQ disabled" param to understand IRQ
243 * enabled status (RFLAGS.IF) of the TD guest and to determine
244 * whether or not it should schedule the halted vCPU if an
245 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
246 * can keep the vCPU in virtual HLT, even if an IRQ is
247 * pending, without hanging/breaking the guest.
248 */
249 return __tdx_hypercall(&args);
250 }
251
handle_halt(struct ve_info * ve)252 static int handle_halt(struct ve_info *ve)
253 {
254 const bool irq_disabled = irqs_disabled();
255
256 if (__halt(irq_disabled))
257 return -EIO;
258
259 return ve_instr_len(ve);
260 }
261
tdx_safe_halt(void)262 void __cpuidle tdx_safe_halt(void)
263 {
264 const bool irq_disabled = false;
265
266 /*
267 * Use WARN_ONCE() to report the failure.
268 */
269 if (__halt(irq_disabled))
270 WARN_ONCE(1, "HLT instruction emulation failed\n");
271 }
272
read_msr(struct pt_regs * regs,struct ve_info * ve)273 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
274 {
275 struct tdx_hypercall_args args = {
276 .r10 = TDX_HYPERCALL_STANDARD,
277 .r11 = hcall_func(EXIT_REASON_MSR_READ),
278 .r12 = regs->cx,
279 };
280
281 /*
282 * Emulate the MSR read via hypercall. More info about ABI
283 * can be found in TDX Guest-Host-Communication Interface
284 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
285 */
286 if (__tdx_hypercall_ret(&args))
287 return -EIO;
288
289 regs->ax = lower_32_bits(args.r11);
290 regs->dx = upper_32_bits(args.r11);
291 return ve_instr_len(ve);
292 }
293
write_msr(struct pt_regs * regs,struct ve_info * ve)294 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
295 {
296 struct tdx_hypercall_args args = {
297 .r10 = TDX_HYPERCALL_STANDARD,
298 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
299 .r12 = regs->cx,
300 .r13 = (u64)regs->dx << 32 | regs->ax,
301 };
302
303 /*
304 * Emulate the MSR write via hypercall. More info about ABI
305 * can be found in TDX Guest-Host-Communication Interface
306 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
307 */
308 if (__tdx_hypercall(&args))
309 return -EIO;
310
311 return ve_instr_len(ve);
312 }
313
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)314 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
315 {
316 struct tdx_hypercall_args args = {
317 .r10 = TDX_HYPERCALL_STANDARD,
318 .r11 = hcall_func(EXIT_REASON_CPUID),
319 .r12 = regs->ax,
320 .r13 = regs->cx,
321 };
322
323 /*
324 * Only allow VMM to control range reserved for hypervisor
325 * communication.
326 *
327 * Return all-zeros for any CPUID outside the range. It matches CPU
328 * behaviour for non-supported leaf.
329 */
330 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
331 regs->ax = regs->bx = regs->cx = regs->dx = 0;
332 return ve_instr_len(ve);
333 }
334
335 /*
336 * Emulate the CPUID instruction via a hypercall. More info about
337 * ABI can be found in TDX Guest-Host-Communication Interface
338 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
339 */
340 if (__tdx_hypercall_ret(&args))
341 return -EIO;
342
343 /*
344 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
345 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
346 * So copy the register contents back to pt_regs.
347 */
348 regs->ax = args.r12;
349 regs->bx = args.r13;
350 regs->cx = args.r14;
351 regs->dx = args.r15;
352
353 return ve_instr_len(ve);
354 }
355
mmio_read(int size,unsigned long addr,unsigned long * val)356 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
357 {
358 struct tdx_hypercall_args args = {
359 .r10 = TDX_HYPERCALL_STANDARD,
360 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
361 .r12 = size,
362 .r13 = EPT_READ,
363 .r14 = addr,
364 .r15 = *val,
365 };
366
367 if (__tdx_hypercall_ret(&args))
368 return false;
369 *val = args.r11;
370 return true;
371 }
372
mmio_write(int size,unsigned long addr,unsigned long val)373 static bool mmio_write(int size, unsigned long addr, unsigned long val)
374 {
375 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
376 EPT_WRITE, addr, val);
377 }
378
handle_mmio(struct pt_regs * regs,struct ve_info * ve)379 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
380 {
381 unsigned long *reg, val, vaddr;
382 char buffer[MAX_INSN_SIZE];
383 enum insn_mmio_type mmio;
384 struct insn insn = {};
385 int size, extend_size;
386 u8 extend_val = 0;
387
388 /* Only in-kernel MMIO is supported */
389 if (WARN_ON_ONCE(user_mode(regs)))
390 return -EFAULT;
391
392 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
393 return -EFAULT;
394
395 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
396 return -EINVAL;
397
398 mmio = insn_decode_mmio(&insn, &size);
399 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
400 return -EINVAL;
401
402 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
403 reg = insn_get_modrm_reg_ptr(&insn, regs);
404 if (!reg)
405 return -EINVAL;
406 }
407
408 /*
409 * Reject EPT violation #VEs that split pages.
410 *
411 * MMIO accesses are supposed to be naturally aligned and therefore
412 * never cross page boundaries. Seeing split page accesses indicates
413 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
414 *
415 * load_unaligned_zeropad() will recover using exception fixups.
416 */
417 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
418 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
419 return -EFAULT;
420
421 /* Handle writes first */
422 switch (mmio) {
423 case INSN_MMIO_WRITE:
424 memcpy(&val, reg, size);
425 if (!mmio_write(size, ve->gpa, val))
426 return -EIO;
427 return insn.length;
428 case INSN_MMIO_WRITE_IMM:
429 val = insn.immediate.value;
430 if (!mmio_write(size, ve->gpa, val))
431 return -EIO;
432 return insn.length;
433 case INSN_MMIO_READ:
434 case INSN_MMIO_READ_ZERO_EXTEND:
435 case INSN_MMIO_READ_SIGN_EXTEND:
436 /* Reads are handled below */
437 break;
438 case INSN_MMIO_MOVS:
439 case INSN_MMIO_DECODE_FAILED:
440 /*
441 * MMIO was accessed with an instruction that could not be
442 * decoded or handled properly. It was likely not using io.h
443 * helpers or accessed MMIO accidentally.
444 */
445 return -EINVAL;
446 default:
447 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
448 return -EINVAL;
449 }
450
451 /* Handle reads */
452 if (!mmio_read(size, ve->gpa, &val))
453 return -EIO;
454
455 switch (mmio) {
456 case INSN_MMIO_READ:
457 /* Zero-extend for 32-bit operation */
458 extend_size = size == 4 ? sizeof(*reg) : 0;
459 break;
460 case INSN_MMIO_READ_ZERO_EXTEND:
461 /* Zero extend based on operand size */
462 extend_size = insn.opnd_bytes;
463 break;
464 case INSN_MMIO_READ_SIGN_EXTEND:
465 /* Sign extend based on operand size */
466 extend_size = insn.opnd_bytes;
467 if (size == 1 && val & BIT(7))
468 extend_val = 0xFF;
469 else if (size > 1 && val & BIT(15))
470 extend_val = 0xFF;
471 break;
472 default:
473 /* All other cases has to be covered with the first switch() */
474 WARN_ON_ONCE(1);
475 return -EINVAL;
476 }
477
478 if (extend_size)
479 memset(reg, extend_val, extend_size);
480 memcpy(reg, &val, size);
481 return insn.length;
482 }
483
handle_in(struct pt_regs * regs,int size,int port)484 static bool handle_in(struct pt_regs *regs, int size, int port)
485 {
486 struct tdx_hypercall_args args = {
487 .r10 = TDX_HYPERCALL_STANDARD,
488 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
489 .r12 = size,
490 .r13 = PORT_READ,
491 .r14 = port,
492 };
493 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
494 bool success;
495
496 /*
497 * Emulate the I/O read via hypercall. More info about ABI can be found
498 * in TDX Guest-Host-Communication Interface (GHCI) section titled
499 * "TDG.VP.VMCALL<Instruction.IO>".
500 */
501 success = !__tdx_hypercall_ret(&args);
502
503 /* Update part of the register affected by the emulated instruction */
504 regs->ax &= ~mask;
505 if (success)
506 regs->ax |= args.r11 & mask;
507
508 return success;
509 }
510
handle_out(struct pt_regs * regs,int size,int port)511 static bool handle_out(struct pt_regs *regs, int size, int port)
512 {
513 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
514
515 /*
516 * Emulate the I/O write via hypercall. More info about ABI can be found
517 * in TDX Guest-Host-Communication Interface (GHCI) section titled
518 * "TDG.VP.VMCALL<Instruction.IO>".
519 */
520 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
521 PORT_WRITE, port, regs->ax & mask);
522 }
523
524 /*
525 * Emulate I/O using hypercall.
526 *
527 * Assumes the IO instruction was using ax, which is enforced
528 * by the standard io.h macros.
529 *
530 * Return True on success or False on failure.
531 */
handle_io(struct pt_regs * regs,struct ve_info * ve)532 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
533 {
534 u32 exit_qual = ve->exit_qual;
535 int size, port;
536 bool in, ret;
537
538 if (VE_IS_IO_STRING(exit_qual))
539 return -EIO;
540
541 in = VE_IS_IO_IN(exit_qual);
542 size = VE_GET_IO_SIZE(exit_qual);
543 port = VE_GET_PORT_NUM(exit_qual);
544
545
546 if (in)
547 ret = handle_in(regs, size, port);
548 else
549 ret = handle_out(regs, size, port);
550 if (!ret)
551 return -EIO;
552
553 return ve_instr_len(ve);
554 }
555
556 /*
557 * Early #VE exception handler. Only handles a subset of port I/O.
558 * Intended only for earlyprintk. If failed, return false.
559 */
tdx_early_handle_ve(struct pt_regs * regs)560 __init bool tdx_early_handle_ve(struct pt_regs *regs)
561 {
562 struct ve_info ve;
563 int insn_len;
564
565 tdx_get_ve_info(&ve);
566
567 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
568 return false;
569
570 insn_len = handle_io(regs, &ve);
571 if (insn_len < 0)
572 return false;
573
574 regs->ip += insn_len;
575 return true;
576 }
577
tdx_get_ve_info(struct ve_info * ve)578 void tdx_get_ve_info(struct ve_info *ve)
579 {
580 struct tdx_module_output out;
581
582 /*
583 * Called during #VE handling to retrieve the #VE info from the
584 * TDX module.
585 *
586 * This has to be called early in #VE handling. A "nested" #VE which
587 * occurs before this will raise a #DF and is not recoverable.
588 *
589 * The call retrieves the #VE info from the TDX module, which also
590 * clears the "#VE valid" flag. This must be done before anything else
591 * because any #VE that occurs while the valid flag is set will lead to
592 * #DF.
593 *
594 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
595 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
596 */
597 tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
598
599 /* Transfer the output parameters */
600 ve->exit_reason = out.rcx;
601 ve->exit_qual = out.rdx;
602 ve->gla = out.r8;
603 ve->gpa = out.r9;
604 ve->instr_len = lower_32_bits(out.r10);
605 ve->instr_info = upper_32_bits(out.r10);
606 }
607
608 /*
609 * Handle the user initiated #VE.
610 *
611 * On success, returns the number of bytes RIP should be incremented (>=0)
612 * or -errno on error.
613 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)614 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
615 {
616 switch (ve->exit_reason) {
617 case EXIT_REASON_CPUID:
618 return handle_cpuid(regs, ve);
619 default:
620 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
621 return -EIO;
622 }
623 }
624
is_private_gpa(u64 gpa)625 static inline bool is_private_gpa(u64 gpa)
626 {
627 return gpa == cc_mkenc(gpa);
628 }
629
630 /*
631 * Handle the kernel #VE.
632 *
633 * On success, returns the number of bytes RIP should be incremented (>=0)
634 * or -errno on error.
635 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)636 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
637 {
638 switch (ve->exit_reason) {
639 case EXIT_REASON_HLT:
640 return handle_halt(ve);
641 case EXIT_REASON_MSR_READ:
642 return read_msr(regs, ve);
643 case EXIT_REASON_MSR_WRITE:
644 return write_msr(regs, ve);
645 case EXIT_REASON_CPUID:
646 return handle_cpuid(regs, ve);
647 case EXIT_REASON_EPT_VIOLATION:
648 if (is_private_gpa(ve->gpa))
649 panic("Unexpected EPT-violation on private memory.");
650 return handle_mmio(regs, ve);
651 case EXIT_REASON_IO_INSTRUCTION:
652 return handle_io(regs, ve);
653 default:
654 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
655 return -EIO;
656 }
657 }
658
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)659 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
660 {
661 int insn_len;
662
663 if (user_mode(regs))
664 insn_len = virt_exception_user(regs, ve);
665 else
666 insn_len = virt_exception_kernel(regs, ve);
667 if (insn_len < 0)
668 return false;
669
670 /* After successful #VE handling, move the IP */
671 regs->ip += insn_len;
672
673 return true;
674 }
675
tdx_tlb_flush_required(bool private)676 static bool tdx_tlb_flush_required(bool private)
677 {
678 /*
679 * TDX guest is responsible for flushing TLB on private->shared
680 * transition. VMM is responsible for flushing on shared->private.
681 *
682 * The VMM _can't_ flush private addresses as it can't generate PAs
683 * with the guest's HKID. Shared memory isn't subject to integrity
684 * checking, i.e. the VMM doesn't need to flush for its own protection.
685 *
686 * There's no need to flush when converting from shared to private,
687 * as flushing is the VMM's responsibility in this case, e.g. it must
688 * flush to avoid integrity failures in the face of a buggy or
689 * malicious guest.
690 */
691 return !private;
692 }
693
tdx_cache_flush_required(void)694 static bool tdx_cache_flush_required(void)
695 {
696 /*
697 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
698 * TDX doesn't have such capability.
699 *
700 * Flush cache unconditionally.
701 */
702 return true;
703 }
704
705 /*
706 * Inform the VMM of the guest's intent for this physical page: shared with
707 * the VMM or private to the guest. The VMM is expected to change its mapping
708 * of the page in response.
709 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)710 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
711 {
712 phys_addr_t start = __pa(vaddr);
713 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
714
715 if (!enc) {
716 /* Set the shared (decrypted) bits: */
717 start |= cc_mkdec(0);
718 end |= cc_mkdec(0);
719 }
720
721 /*
722 * Notify the VMM about page mapping conversion. More info about ABI
723 * can be found in TDX Guest-Host-Communication Interface (GHCI),
724 * section "TDG.VP.VMCALL<MapGPA>"
725 */
726 if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
727 return false;
728
729 /* shared->private conversion requires memory to be accepted before use */
730 if (enc)
731 return tdx_accept_memory(start, end);
732
733 return true;
734 }
735
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)736 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
737 bool enc)
738 {
739 /*
740 * Only handle shared->private conversion here.
741 * See the comment in tdx_early_init().
742 */
743 if (enc)
744 return tdx_enc_status_changed(vaddr, numpages, enc);
745 return true;
746 }
747
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)748 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
749 bool enc)
750 {
751 /*
752 * Only handle private->shared conversion here.
753 * See the comment in tdx_early_init().
754 */
755 if (!enc)
756 return tdx_enc_status_changed(vaddr, numpages, enc);
757 return true;
758 }
759
tdx_early_init(void)760 void __init tdx_early_init(void)
761 {
762 u64 cc_mask;
763 u32 eax, sig[3];
764
765 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
766
767 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
768 return;
769
770 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
771
772 cc_vendor = CC_VENDOR_INTEL;
773 tdx_parse_tdinfo(&cc_mask);
774 cc_set_mask(cc_mask);
775
776 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
777 tdx_module_call(TDX_WR, 0, TDCS_NOTIFY_ENABLES, 0, -1ULL, NULL);
778
779 /*
780 * All bits above GPA width are reserved and kernel treats shared bit
781 * as flag, not as part of physical address.
782 *
783 * Adjust physical mask to only cover valid GPA bits.
784 */
785 physical_mask &= cc_mask - 1;
786
787 /*
788 * The kernel mapping should match the TDX metadata for the page.
789 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
790 * owned by the caller and can catch even _momentary_ mismatches. Bad
791 * things happen on mismatch:
792 *
793 * - Private mapping => Shared Page == Guest shutdown
794 * - Shared mapping => Private Page == Recoverable #VE
795 *
796 * guest.enc_status_change_prepare() converts the page from
797 * shared=>private before the mapping becomes private.
798 *
799 * guest.enc_status_change_finish() converts the page from
800 * private=>shared after the mapping becomes private.
801 *
802 * In both cases there is a temporary shared mapping to a private page,
803 * which can result in a #VE. But, there is never a private mapping to
804 * a shared page.
805 */
806 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
807 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
808
809 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
810 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
811
812 /*
813 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
814 * bringup low level code. That raises #VE which cannot be handled
815 * there.
816 *
817 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
818 * implemented seperately in the low level startup ASM code.
819 * Until that is in place, disable parallel bringup for TDX.
820 */
821 x86_cpuinit.parallel_bringup = false;
822
823 pr_info("Guest detected\n");
824 }
825