xref: /linux/arch/x86/coco/tdx/tdx.c (revision d4fc4d01)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3 
4 #undef pr_fmt
5 #define pr_fmt(fmt)     "tdx: " fmt
6 
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/kexec.h>
11 #include <asm/coco.h>
12 #include <asm/tdx.h>
13 #include <asm/vmx.h>
14 #include <asm/ia32.h>
15 #include <asm/insn.h>
16 #include <asm/insn-eval.h>
17 #include <asm/pgtable.h>
18 #include <asm/set_memory.h>
19 #include <asm/traps.h>
20 
21 /* MMIO direction */
22 #define EPT_READ	0
23 #define EPT_WRITE	1
24 
25 /* Port I/O direction */
26 #define PORT_READ	0
27 #define PORT_WRITE	1
28 
29 /* See Exit Qualification for I/O Instructions in VMX documentation */
30 #define VE_IS_IO_IN(e)		((e) & BIT(3))
31 #define VE_GET_IO_SIZE(e)	(((e) & GENMASK(2, 0)) + 1)
32 #define VE_GET_PORT_NUM(e)	((e) >> 16)
33 #define VE_IS_IO_STRING(e)	((e) & BIT(4))
34 
35 #define ATTR_DEBUG		BIT(0)
36 #define ATTR_SEPT_VE_DISABLE	BIT(28)
37 
38 /* TDX Module call error codes */
39 #define TDCALL_RETURN_CODE(a)	((a) >> 32)
40 #define TDCALL_INVALID_OPERAND	0xc0000100
41 
42 #define TDREPORT_SUBTYPE_0	0
43 
44 static atomic_long_t nr_shared;
45 
46 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)47 noinstr void __noreturn __tdx_hypercall_failed(void)
48 {
49 	instrumentation_begin();
50 	panic("TDVMCALL failed. TDX module bug?");
51 }
52 
53 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)54 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
55 		       unsigned long p3, unsigned long p4)
56 {
57 	struct tdx_module_args args = {
58 		.r10 = nr,
59 		.r11 = p1,
60 		.r12 = p2,
61 		.r13 = p3,
62 		.r14 = p4,
63 	};
64 
65 	return __tdx_hypercall(&args);
66 }
67 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
68 #endif
69 
70 /*
71  * Used for TDX guests to make calls directly to the TD module.  This
72  * should only be used for calls that have no legitimate reason to fail
73  * or where the kernel can not survive the call failing.
74  */
tdcall(u64 fn,struct tdx_module_args * args)75 static inline void tdcall(u64 fn, struct tdx_module_args *args)
76 {
77 	if (__tdcall_ret(fn, args))
78 		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
79 }
80 
81 /**
82  * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
83  *                           subtype 0) using TDG.MR.REPORT TDCALL.
84  * @reportdata: Address of the input buffer which contains user-defined
85  *              REPORTDATA to be included into TDREPORT.
86  * @tdreport: Address of the output buffer to store TDREPORT.
87  *
88  * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
89  * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
90  * It is used in the TDX guest driver module to get the TDREPORT0.
91  *
92  * Return 0 on success, -EINVAL for invalid operands, or -EIO on
93  * other TDCALL failures.
94  */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)95 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
96 {
97 	struct tdx_module_args args = {
98 		.rcx = virt_to_phys(tdreport),
99 		.rdx = virt_to_phys(reportdata),
100 		.r8 = TDREPORT_SUBTYPE_0,
101 	};
102 	u64 ret;
103 
104 	ret = __tdcall(TDG_MR_REPORT, &args);
105 	if (ret) {
106 		if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
107 			return -EINVAL;
108 		return -EIO;
109 	}
110 
111 	return 0;
112 }
113 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
114 
115 /**
116  * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
117  *                         hypercall.
118  * @buf: Address of the directly mapped shared kernel buffer which
119  *       contains TDREPORT. The same buffer will be used by VMM to
120  *       store the generated TD Quote output.
121  * @size: size of the tdquote buffer (4KB-aligned).
122  *
123  * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
124  * v1.0 specification for more information on GetQuote hypercall.
125  * It is used in the TDX guest driver module to get the TD Quote.
126  *
127  * Return 0 on success or error code on failure.
128  */
tdx_hcall_get_quote(u8 * buf,size_t size)129 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
130 {
131 	/* Since buf is a shared memory, set the shared (decrypted) bits */
132 	return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
133 }
134 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
135 
tdx_panic(const char * msg)136 static void __noreturn tdx_panic(const char *msg)
137 {
138 	struct tdx_module_args args = {
139 		.r10 = TDX_HYPERCALL_STANDARD,
140 		.r11 = TDVMCALL_REPORT_FATAL_ERROR,
141 		.r12 = 0, /* Error code: 0 is Panic */
142 	};
143 	union {
144 		/* Define register order according to the GHCI */
145 		struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
146 
147 		char str[64];
148 	} message;
149 
150 	/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
151 	strtomem_pad(message.str, msg, '\0');
152 
153 	args.r8  = message.r8;
154 	args.r9  = message.r9;
155 	args.r14 = message.r14;
156 	args.r15 = message.r15;
157 	args.rdi = message.rdi;
158 	args.rsi = message.rsi;
159 	args.rbx = message.rbx;
160 	args.rdx = message.rdx;
161 
162 	/*
163 	 * This hypercall should never return and it is not safe
164 	 * to keep the guest running. Call it forever if it
165 	 * happens to return.
166 	 */
167 	while (1)
168 		__tdx_hypercall(&args);
169 }
170 
tdx_parse_tdinfo(u64 * cc_mask)171 static void tdx_parse_tdinfo(u64 *cc_mask)
172 {
173 	struct tdx_module_args args = {};
174 	unsigned int gpa_width;
175 	u64 td_attr;
176 
177 	/*
178 	 * TDINFO TDX module call is used to get the TD execution environment
179 	 * information like GPA width, number of available vcpus, debug mode
180 	 * information, etc. More details about the ABI can be found in TDX
181 	 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
182 	 * [TDG.VP.INFO].
183 	 */
184 	tdcall(TDG_VP_INFO, &args);
185 
186 	/*
187 	 * The highest bit of a guest physical address is the "sharing" bit.
188 	 * Set it for shared pages and clear it for private pages.
189 	 *
190 	 * The GPA width that comes out of this call is critical. TDX guests
191 	 * can not meaningfully run without it.
192 	 */
193 	gpa_width = args.rcx & GENMASK(5, 0);
194 	*cc_mask = BIT_ULL(gpa_width - 1);
195 
196 	/*
197 	 * The kernel can not handle #VE's when accessing normal kernel
198 	 * memory.  Ensure that no #VE will be delivered for accesses to
199 	 * TD-private memory.  Only VMM-shared memory (MMIO) will #VE.
200 	 */
201 	td_attr = args.rdx;
202 	if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
203 		const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
204 
205 		/* Relax SEPT_VE_DISABLE check for debug TD. */
206 		if (td_attr & ATTR_DEBUG)
207 			pr_warn("%s\n", msg);
208 		else
209 			tdx_panic(msg);
210 	}
211 }
212 
213 /*
214  * The TDX module spec states that #VE may be injected for a limited set of
215  * reasons:
216  *
217  *  - Emulation of the architectural #VE injection on EPT violation;
218  *
219  *  - As a result of guest TD execution of a disallowed instruction,
220  *    a disallowed MSR access, or CPUID virtualization;
221  *
222  *  - A notification to the guest TD about anomalous behavior;
223  *
224  * The last one is opt-in and is not used by the kernel.
225  *
226  * The Intel Software Developer's Manual describes cases when instruction
227  * length field can be used in section "Information for VM Exits Due to
228  * Instruction Execution".
229  *
230  * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
231  * information if #VE occurred due to instruction execution, but not for EPT
232  * violations.
233  */
ve_instr_len(struct ve_info * ve)234 static int ve_instr_len(struct ve_info *ve)
235 {
236 	switch (ve->exit_reason) {
237 	case EXIT_REASON_HLT:
238 	case EXIT_REASON_MSR_READ:
239 	case EXIT_REASON_MSR_WRITE:
240 	case EXIT_REASON_CPUID:
241 	case EXIT_REASON_IO_INSTRUCTION:
242 		/* It is safe to use ve->instr_len for #VE due instructions */
243 		return ve->instr_len;
244 	case EXIT_REASON_EPT_VIOLATION:
245 		/*
246 		 * For EPT violations, ve->insn_len is not defined. For those,
247 		 * the kernel must decode instructions manually and should not
248 		 * be using this function.
249 		 */
250 		WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
251 		return 0;
252 	default:
253 		WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
254 		return ve->instr_len;
255 	}
256 }
257 
__halt(const bool irq_disabled)258 static u64 __cpuidle __halt(const bool irq_disabled)
259 {
260 	struct tdx_module_args args = {
261 		.r10 = TDX_HYPERCALL_STANDARD,
262 		.r11 = hcall_func(EXIT_REASON_HLT),
263 		.r12 = irq_disabled,
264 	};
265 
266 	/*
267 	 * Emulate HLT operation via hypercall. More info about ABI
268 	 * can be found in TDX Guest-Host-Communication Interface
269 	 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
270 	 *
271 	 * The VMM uses the "IRQ disabled" param to understand IRQ
272 	 * enabled status (RFLAGS.IF) of the TD guest and to determine
273 	 * whether or not it should schedule the halted vCPU if an
274 	 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
275 	 * can keep the vCPU in virtual HLT, even if an IRQ is
276 	 * pending, without hanging/breaking the guest.
277 	 */
278 	return __tdx_hypercall(&args);
279 }
280 
handle_halt(struct ve_info * ve)281 static int handle_halt(struct ve_info *ve)
282 {
283 	const bool irq_disabled = irqs_disabled();
284 
285 	if (__halt(irq_disabled))
286 		return -EIO;
287 
288 	return ve_instr_len(ve);
289 }
290 
tdx_safe_halt(void)291 void __cpuidle tdx_safe_halt(void)
292 {
293 	const bool irq_disabled = false;
294 
295 	/*
296 	 * Use WARN_ONCE() to report the failure.
297 	 */
298 	if (__halt(irq_disabled))
299 		WARN_ONCE(1, "HLT instruction emulation failed\n");
300 }
301 
read_msr(struct pt_regs * regs,struct ve_info * ve)302 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
303 {
304 	struct tdx_module_args args = {
305 		.r10 = TDX_HYPERCALL_STANDARD,
306 		.r11 = hcall_func(EXIT_REASON_MSR_READ),
307 		.r12 = regs->cx,
308 	};
309 
310 	/*
311 	 * Emulate the MSR read via hypercall. More info about ABI
312 	 * can be found in TDX Guest-Host-Communication Interface
313 	 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
314 	 */
315 	if (__tdx_hypercall(&args))
316 		return -EIO;
317 
318 	regs->ax = lower_32_bits(args.r11);
319 	regs->dx = upper_32_bits(args.r11);
320 	return ve_instr_len(ve);
321 }
322 
write_msr(struct pt_regs * regs,struct ve_info * ve)323 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
324 {
325 	struct tdx_module_args args = {
326 		.r10 = TDX_HYPERCALL_STANDARD,
327 		.r11 = hcall_func(EXIT_REASON_MSR_WRITE),
328 		.r12 = regs->cx,
329 		.r13 = (u64)regs->dx << 32 | regs->ax,
330 	};
331 
332 	/*
333 	 * Emulate the MSR write via hypercall. More info about ABI
334 	 * can be found in TDX Guest-Host-Communication Interface
335 	 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
336 	 */
337 	if (__tdx_hypercall(&args))
338 		return -EIO;
339 
340 	return ve_instr_len(ve);
341 }
342 
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)343 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
344 {
345 	struct tdx_module_args args = {
346 		.r10 = TDX_HYPERCALL_STANDARD,
347 		.r11 = hcall_func(EXIT_REASON_CPUID),
348 		.r12 = regs->ax,
349 		.r13 = regs->cx,
350 	};
351 
352 	/*
353 	 * Only allow VMM to control range reserved for hypervisor
354 	 * communication.
355 	 *
356 	 * Return all-zeros for any CPUID outside the range. It matches CPU
357 	 * behaviour for non-supported leaf.
358 	 */
359 	if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
360 		regs->ax = regs->bx = regs->cx = regs->dx = 0;
361 		return ve_instr_len(ve);
362 	}
363 
364 	/*
365 	 * Emulate the CPUID instruction via a hypercall. More info about
366 	 * ABI can be found in TDX Guest-Host-Communication Interface
367 	 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
368 	 */
369 	if (__tdx_hypercall(&args))
370 		return -EIO;
371 
372 	/*
373 	 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
374 	 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
375 	 * So copy the register contents back to pt_regs.
376 	 */
377 	regs->ax = args.r12;
378 	regs->bx = args.r13;
379 	regs->cx = args.r14;
380 	regs->dx = args.r15;
381 
382 	return ve_instr_len(ve);
383 }
384 
mmio_read(int size,unsigned long addr,unsigned long * val)385 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
386 {
387 	struct tdx_module_args args = {
388 		.r10 = TDX_HYPERCALL_STANDARD,
389 		.r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
390 		.r12 = size,
391 		.r13 = EPT_READ,
392 		.r14 = addr,
393 	};
394 
395 	if (__tdx_hypercall(&args))
396 		return false;
397 
398 	*val = args.r11;
399 	return true;
400 }
401 
mmio_write(int size,unsigned long addr,unsigned long val)402 static bool mmio_write(int size, unsigned long addr, unsigned long val)
403 {
404 	return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
405 			       EPT_WRITE, addr, val);
406 }
407 
handle_mmio(struct pt_regs * regs,struct ve_info * ve)408 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
409 {
410 	unsigned long *reg, val, vaddr;
411 	char buffer[MAX_INSN_SIZE];
412 	enum insn_mmio_type mmio;
413 	struct insn insn = {};
414 	int size, extend_size;
415 	u8 extend_val = 0;
416 
417 	/* Only in-kernel MMIO is supported */
418 	if (WARN_ON_ONCE(user_mode(regs)))
419 		return -EFAULT;
420 
421 	if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
422 		return -EFAULT;
423 
424 	if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
425 		return -EINVAL;
426 
427 	mmio = insn_decode_mmio(&insn, &size);
428 	if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
429 		return -EINVAL;
430 
431 	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
432 		reg = insn_get_modrm_reg_ptr(&insn, regs);
433 		if (!reg)
434 			return -EINVAL;
435 	}
436 
437 	if (!fault_in_kernel_space(ve->gla)) {
438 		WARN_ONCE(1, "Access to userspace address is not supported");
439 		return -EINVAL;
440 	}
441 
442 	/*
443 	 * Reject EPT violation #VEs that split pages.
444 	 *
445 	 * MMIO accesses are supposed to be naturally aligned and therefore
446 	 * never cross page boundaries. Seeing split page accesses indicates
447 	 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
448 	 *
449 	 * load_unaligned_zeropad() will recover using exception fixups.
450 	 */
451 	vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
452 	if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
453 		return -EFAULT;
454 
455 	/* Handle writes first */
456 	switch (mmio) {
457 	case INSN_MMIO_WRITE:
458 		memcpy(&val, reg, size);
459 		if (!mmio_write(size, ve->gpa, val))
460 			return -EIO;
461 		return insn.length;
462 	case INSN_MMIO_WRITE_IMM:
463 		val = insn.immediate.value;
464 		if (!mmio_write(size, ve->gpa, val))
465 			return -EIO;
466 		return insn.length;
467 	case INSN_MMIO_READ:
468 	case INSN_MMIO_READ_ZERO_EXTEND:
469 	case INSN_MMIO_READ_SIGN_EXTEND:
470 		/* Reads are handled below */
471 		break;
472 	case INSN_MMIO_MOVS:
473 	case INSN_MMIO_DECODE_FAILED:
474 		/*
475 		 * MMIO was accessed with an instruction that could not be
476 		 * decoded or handled properly. It was likely not using io.h
477 		 * helpers or accessed MMIO accidentally.
478 		 */
479 		return -EINVAL;
480 	default:
481 		WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
482 		return -EINVAL;
483 	}
484 
485 	/* Handle reads */
486 	if (!mmio_read(size, ve->gpa, &val))
487 		return -EIO;
488 
489 	switch (mmio) {
490 	case INSN_MMIO_READ:
491 		/* Zero-extend for 32-bit operation */
492 		extend_size = size == 4 ? sizeof(*reg) : 0;
493 		break;
494 	case INSN_MMIO_READ_ZERO_EXTEND:
495 		/* Zero extend based on operand size */
496 		extend_size = insn.opnd_bytes;
497 		break;
498 	case INSN_MMIO_READ_SIGN_EXTEND:
499 		/* Sign extend based on operand size */
500 		extend_size = insn.opnd_bytes;
501 		if (size == 1 && val & BIT(7))
502 			extend_val = 0xFF;
503 		else if (size > 1 && val & BIT(15))
504 			extend_val = 0xFF;
505 		break;
506 	default:
507 		/* All other cases has to be covered with the first switch() */
508 		WARN_ON_ONCE(1);
509 		return -EINVAL;
510 	}
511 
512 	if (extend_size)
513 		memset(reg, extend_val, extend_size);
514 	memcpy(reg, &val, size);
515 	return insn.length;
516 }
517 
handle_in(struct pt_regs * regs,int size,int port)518 static bool handle_in(struct pt_regs *regs, int size, int port)
519 {
520 	struct tdx_module_args args = {
521 		.r10 = TDX_HYPERCALL_STANDARD,
522 		.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
523 		.r12 = size,
524 		.r13 = PORT_READ,
525 		.r14 = port,
526 	};
527 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
528 	bool success;
529 
530 	/*
531 	 * Emulate the I/O read via hypercall. More info about ABI can be found
532 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
533 	 * "TDG.VP.VMCALL<Instruction.IO>".
534 	 */
535 	success = !__tdx_hypercall(&args);
536 
537 	/* Update part of the register affected by the emulated instruction */
538 	regs->ax &= ~mask;
539 	if (success)
540 		regs->ax |= args.r11 & mask;
541 
542 	return success;
543 }
544 
handle_out(struct pt_regs * regs,int size,int port)545 static bool handle_out(struct pt_regs *regs, int size, int port)
546 {
547 	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
548 
549 	/*
550 	 * Emulate the I/O write via hypercall. More info about ABI can be found
551 	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
552 	 * "TDG.VP.VMCALL<Instruction.IO>".
553 	 */
554 	return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
555 			       PORT_WRITE, port, regs->ax & mask);
556 }
557 
558 /*
559  * Emulate I/O using hypercall.
560  *
561  * Assumes the IO instruction was using ax, which is enforced
562  * by the standard io.h macros.
563  *
564  * Return True on success or False on failure.
565  */
handle_io(struct pt_regs * regs,struct ve_info * ve)566 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
567 {
568 	u32 exit_qual = ve->exit_qual;
569 	int size, port;
570 	bool in, ret;
571 
572 	if (VE_IS_IO_STRING(exit_qual))
573 		return -EIO;
574 
575 	in   = VE_IS_IO_IN(exit_qual);
576 	size = VE_GET_IO_SIZE(exit_qual);
577 	port = VE_GET_PORT_NUM(exit_qual);
578 
579 
580 	if (in)
581 		ret = handle_in(regs, size, port);
582 	else
583 		ret = handle_out(regs, size, port);
584 	if (!ret)
585 		return -EIO;
586 
587 	return ve_instr_len(ve);
588 }
589 
590 /*
591  * Early #VE exception handler. Only handles a subset of port I/O.
592  * Intended only for earlyprintk. If failed, return false.
593  */
tdx_early_handle_ve(struct pt_regs * regs)594 __init bool tdx_early_handle_ve(struct pt_regs *regs)
595 {
596 	struct ve_info ve;
597 	int insn_len;
598 
599 	tdx_get_ve_info(&ve);
600 
601 	if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
602 		return false;
603 
604 	insn_len = handle_io(regs, &ve);
605 	if (insn_len < 0)
606 		return false;
607 
608 	regs->ip += insn_len;
609 	return true;
610 }
611 
tdx_get_ve_info(struct ve_info * ve)612 void tdx_get_ve_info(struct ve_info *ve)
613 {
614 	struct tdx_module_args args = {};
615 
616 	/*
617 	 * Called during #VE handling to retrieve the #VE info from the
618 	 * TDX module.
619 	 *
620 	 * This has to be called early in #VE handling.  A "nested" #VE which
621 	 * occurs before this will raise a #DF and is not recoverable.
622 	 *
623 	 * The call retrieves the #VE info from the TDX module, which also
624 	 * clears the "#VE valid" flag. This must be done before anything else
625 	 * because any #VE that occurs while the valid flag is set will lead to
626 	 * #DF.
627 	 *
628 	 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
629 	 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
630 	 */
631 	tdcall(TDG_VP_VEINFO_GET, &args);
632 
633 	/* Transfer the output parameters */
634 	ve->exit_reason = args.rcx;
635 	ve->exit_qual   = args.rdx;
636 	ve->gla         = args.r8;
637 	ve->gpa         = args.r9;
638 	ve->instr_len   = lower_32_bits(args.r10);
639 	ve->instr_info  = upper_32_bits(args.r10);
640 }
641 
642 /*
643  * Handle the user initiated #VE.
644  *
645  * On success, returns the number of bytes RIP should be incremented (>=0)
646  * or -errno on error.
647  */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)648 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
649 {
650 	switch (ve->exit_reason) {
651 	case EXIT_REASON_CPUID:
652 		return handle_cpuid(regs, ve);
653 	default:
654 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
655 		return -EIO;
656 	}
657 }
658 
is_private_gpa(u64 gpa)659 static inline bool is_private_gpa(u64 gpa)
660 {
661 	return gpa == cc_mkenc(gpa);
662 }
663 
664 /*
665  * Handle the kernel #VE.
666  *
667  * On success, returns the number of bytes RIP should be incremented (>=0)
668  * or -errno on error.
669  */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)670 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
671 {
672 	switch (ve->exit_reason) {
673 	case EXIT_REASON_HLT:
674 		return handle_halt(ve);
675 	case EXIT_REASON_MSR_READ:
676 		return read_msr(regs, ve);
677 	case EXIT_REASON_MSR_WRITE:
678 		return write_msr(regs, ve);
679 	case EXIT_REASON_CPUID:
680 		return handle_cpuid(regs, ve);
681 	case EXIT_REASON_EPT_VIOLATION:
682 		if (is_private_gpa(ve->gpa))
683 			panic("Unexpected EPT-violation on private memory.");
684 		return handle_mmio(regs, ve);
685 	case EXIT_REASON_IO_INSTRUCTION:
686 		return handle_io(regs, ve);
687 	default:
688 		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
689 		return -EIO;
690 	}
691 }
692 
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)693 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
694 {
695 	int insn_len;
696 
697 	if (user_mode(regs))
698 		insn_len = virt_exception_user(regs, ve);
699 	else
700 		insn_len = virt_exception_kernel(regs, ve);
701 	if (insn_len < 0)
702 		return false;
703 
704 	/* After successful #VE handling, move the IP */
705 	regs->ip += insn_len;
706 
707 	return true;
708 }
709 
tdx_tlb_flush_required(bool private)710 static bool tdx_tlb_flush_required(bool private)
711 {
712 	/*
713 	 * TDX guest is responsible for flushing TLB on private->shared
714 	 * transition. VMM is responsible for flushing on shared->private.
715 	 *
716 	 * The VMM _can't_ flush private addresses as it can't generate PAs
717 	 * with the guest's HKID.  Shared memory isn't subject to integrity
718 	 * checking, i.e. the VMM doesn't need to flush for its own protection.
719 	 *
720 	 * There's no need to flush when converting from shared to private,
721 	 * as flushing is the VMM's responsibility in this case, e.g. it must
722 	 * flush to avoid integrity failures in the face of a buggy or
723 	 * malicious guest.
724 	 */
725 	return !private;
726 }
727 
tdx_cache_flush_required(void)728 static bool tdx_cache_flush_required(void)
729 {
730 	/*
731 	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
732 	 * TDX doesn't have such capability.
733 	 *
734 	 * Flush cache unconditionally.
735 	 */
736 	return true;
737 }
738 
739 /*
740  * Notify the VMM about page mapping conversion. More info about ABI
741  * can be found in TDX Guest-Host-Communication Interface (GHCI),
742  * section "TDG.VP.VMCALL<MapGPA>".
743  */
tdx_map_gpa(phys_addr_t start,phys_addr_t end,bool enc)744 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
745 {
746 	/* Retrying the hypercall a second time should succeed; use 3 just in case */
747 	const int max_retries_per_page = 3;
748 	int retry_count = 0;
749 
750 	if (!enc) {
751 		/* Set the shared (decrypted) bits: */
752 		start |= cc_mkdec(0);
753 		end   |= cc_mkdec(0);
754 	}
755 
756 	while (retry_count < max_retries_per_page) {
757 		struct tdx_module_args args = {
758 			.r10 = TDX_HYPERCALL_STANDARD,
759 			.r11 = TDVMCALL_MAP_GPA,
760 			.r12 = start,
761 			.r13 = end - start };
762 
763 		u64 map_fail_paddr;
764 		u64 ret = __tdx_hypercall(&args);
765 
766 		if (ret != TDVMCALL_STATUS_RETRY)
767 			return !ret;
768 		/*
769 		 * The guest must retry the operation for the pages in the
770 		 * region starting at the GPA specified in R11. R11 comes
771 		 * from the untrusted VMM. Sanity check it.
772 		 */
773 		map_fail_paddr = args.r11;
774 		if (map_fail_paddr < start || map_fail_paddr >= end)
775 			return false;
776 
777 		/* "Consume" a retry without forward progress */
778 		if (map_fail_paddr == start) {
779 			retry_count++;
780 			continue;
781 		}
782 
783 		start = map_fail_paddr;
784 		retry_count = 0;
785 	}
786 
787 	return false;
788 }
789 
790 /*
791  * Inform the VMM of the guest's intent for this physical page: shared with
792  * the VMM or private to the guest.  The VMM is expected to change its mapping
793  * of the page in response.
794  */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)795 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
796 {
797 	phys_addr_t start = __pa(vaddr);
798 	phys_addr_t end   = __pa(vaddr + numpages * PAGE_SIZE);
799 
800 	if (!tdx_map_gpa(start, end, enc))
801 		return false;
802 
803 	/* shared->private conversion requires memory to be accepted before use */
804 	if (enc)
805 		return tdx_accept_memory(start, end);
806 
807 	return true;
808 }
809 
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)810 static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
811 					 bool enc)
812 {
813 	/*
814 	 * Only handle shared->private conversion here.
815 	 * See the comment in tdx_early_init().
816 	 */
817 	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
818 		return -EIO;
819 
820 	return 0;
821 }
822 
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)823 static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
824 					 bool enc)
825 {
826 	/*
827 	 * Only handle private->shared conversion here.
828 	 * See the comment in tdx_early_init().
829 	 */
830 	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
831 		return -EIO;
832 
833 	if (enc)
834 		atomic_long_sub(numpages, &nr_shared);
835 	else
836 		atomic_long_add(numpages, &nr_shared);
837 
838 	return 0;
839 }
840 
841 /* Stop new private<->shared conversions */
tdx_kexec_begin(void)842 static void tdx_kexec_begin(void)
843 {
844 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
845 		return;
846 
847 	/*
848 	 * Crash kernel reaches here with interrupts disabled: can't wait for
849 	 * conversions to finish.
850 	 *
851 	 * If race happened, just report and proceed.
852 	 */
853 	if (!set_memory_enc_stop_conversion())
854 		pr_warn("Failed to stop shared<->private conversions\n");
855 }
856 
857 /* Walk direct mapping and convert all shared memory back to private */
tdx_kexec_finish(void)858 static void tdx_kexec_finish(void)
859 {
860 	unsigned long addr, end;
861 	long found = 0, shared;
862 
863 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
864 		return;
865 
866 	lockdep_assert_irqs_disabled();
867 
868 	addr = PAGE_OFFSET;
869 	end  = PAGE_OFFSET + get_max_mapped();
870 
871 	while (addr < end) {
872 		unsigned long size;
873 		unsigned int level;
874 		pte_t *pte;
875 
876 		pte = lookup_address(addr, &level);
877 		size = page_level_size(level);
878 
879 		if (pte && pte_decrypted(*pte)) {
880 			int pages = size / PAGE_SIZE;
881 
882 			/*
883 			 * Touching memory with shared bit set triggers implicit
884 			 * conversion to shared.
885 			 *
886 			 * Make sure nobody touches the shared range from
887 			 * now on.
888 			 */
889 			set_pte(pte, __pte(0));
890 
891 			/*
892 			 * Memory encryption state persists across kexec.
893 			 * If tdx_enc_status_changed() fails in the first
894 			 * kernel, it leaves memory in an unknown state.
895 			 *
896 			 * If that memory remains shared, accessing it in the
897 			 * *next* kernel through a private mapping will result
898 			 * in an unrecoverable guest shutdown.
899 			 *
900 			 * The kdump kernel boot is not impacted as it uses
901 			 * a pre-reserved memory range that is always private.
902 			 * However, gathering crash information could lead to
903 			 * a crash if it accesses unconverted memory through
904 			 * a private mapping which is possible when accessing
905 			 * that memory through /proc/vmcore, for example.
906 			 *
907 			 * In all cases, print error info in order to leave
908 			 * enough bread crumbs for debugging.
909 			 */
910 			if (!tdx_enc_status_changed(addr, pages, true)) {
911 				pr_err("Failed to unshare range %#lx-%#lx\n",
912 				       addr, addr + size);
913 			}
914 
915 			found += pages;
916 		}
917 
918 		addr += size;
919 	}
920 
921 	__flush_tlb_all();
922 
923 	shared = atomic_long_read(&nr_shared);
924 	if (shared != found) {
925 		pr_err("shared page accounting is off\n");
926 		pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
927 	}
928 }
929 
tdx_early_init(void)930 void __init tdx_early_init(void)
931 {
932 	struct tdx_module_args args = {
933 		.rdx = TDCS_NOTIFY_ENABLES,
934 		.r9 = -1ULL,
935 	};
936 	u64 cc_mask;
937 	u32 eax, sig[3];
938 
939 	cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2],  &sig[1]);
940 
941 	if (memcmp(TDX_IDENT, sig, sizeof(sig)))
942 		return;
943 
944 	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
945 
946 	/* TSC is the only reliable clock in TDX guest */
947 	setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
948 
949 	cc_vendor = CC_VENDOR_INTEL;
950 	tdx_parse_tdinfo(&cc_mask);
951 	cc_set_mask(cc_mask);
952 
953 	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
954 	tdcall(TDG_VM_WR, &args);
955 
956 	/*
957 	 * All bits above GPA width are reserved and kernel treats shared bit
958 	 * as flag, not as part of physical address.
959 	 *
960 	 * Adjust physical mask to only cover valid GPA bits.
961 	 */
962 	physical_mask &= cc_mask - 1;
963 
964 	/*
965 	 * The kernel mapping should match the TDX metadata for the page.
966 	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
967 	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
968 	 * things happen on mismatch:
969 	 *
970 	 *   - Private mapping => Shared Page  == Guest shutdown
971          *   - Shared mapping  => Private Page == Recoverable #VE
972 	 *
973 	 * guest.enc_status_change_prepare() converts the page from
974 	 * shared=>private before the mapping becomes private.
975 	 *
976 	 * guest.enc_status_change_finish() converts the page from
977 	 * private=>shared after the mapping becomes private.
978 	 *
979 	 * In both cases there is a temporary shared mapping to a private page,
980 	 * which can result in a #VE.  But, there is never a private mapping to
981 	 * a shared page.
982 	 */
983 	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
984 	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
985 
986 	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
987 	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
988 
989 	x86_platform.guest.enc_kexec_begin	     = tdx_kexec_begin;
990 	x86_platform.guest.enc_kexec_finish	     = tdx_kexec_finish;
991 
992 	/*
993 	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
994 	 * bringup low level code. That raises #VE which cannot be handled
995 	 * there.
996 	 *
997 	 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
998 	 * implemented separately in the low level startup ASM code.
999 	 * Until that is in place, disable parallel bringup for TDX.
1000 	 */
1001 	x86_cpuinit.parallel_bringup = false;
1002 
1003 	pr_info("Guest detected\n");
1004 }
1005