1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * AMD Encrypted Register State Support
4  *
5  * Author: Joerg Roedel <jroedel@suse.de>
6  */
7 
8 #ifndef __ASM_ENCRYPTED_STATE_H
9 #define __ASM_ENCRYPTED_STATE_H
10 
11 #include <linux/types.h>
12 #include <asm/insn.h>
13 #include <asm/sev-common.h>
14 
15 #define GHCB_PROTO_OUR		0x0001UL
16 #define GHCB_PROTOCOL_MAX	1ULL
17 #define GHCB_DEFAULT_USAGE	0ULL
18 
19 #define	VMGEXIT()			{ asm volatile("rep; vmmcall\n\r"); }
20 
21 enum es_result {
22 	ES_OK,			/* All good */
23 	ES_UNSUPPORTED,		/* Requested operation not supported */
24 	ES_VMM_ERROR,		/* Unexpected state from the VMM */
25 	ES_DECODE_FAILED,	/* Instruction decoding failed */
26 	ES_EXCEPTION,		/* Instruction caused exception */
27 	ES_RETRY,		/* Retry instruction emulation */
28 };
29 
30 struct es_fault_info {
31 	unsigned long vector;
32 	unsigned long error_code;
33 	unsigned long cr2;
34 };
35 
36 struct pt_regs;
37 
38 /* ES instruction emulation context */
39 struct es_em_ctxt {
40 	struct pt_regs *regs;
41 	struct insn insn;
42 	struct es_fault_info fi;
43 };
44 
45 void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
46 
lower_bits(u64 val,unsigned int bits)47 static inline u64 lower_bits(u64 val, unsigned int bits)
48 {
49 	u64 mask = (1ULL << bits) - 1;
50 
51 	return (val & mask);
52 }
53 
54 struct real_mode_header;
55 enum stack_type;
56 
57 /* Early IDT entry points for #VC handler */
58 extern void vc_no_ghcb(void);
59 extern void vc_boot_ghcb(void);
60 extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
61 
62 #ifdef CONFIG_AMD_MEM_ENCRYPT
63 extern struct static_key_false sev_es_enable_key;
64 extern void __sev_es_ist_enter(struct pt_regs *regs);
65 extern void __sev_es_ist_exit(void);
sev_es_ist_enter(struct pt_regs * regs)66 static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
67 {
68 	if (static_branch_unlikely(&sev_es_enable_key))
69 		__sev_es_ist_enter(regs);
70 }
sev_es_ist_exit(void)71 static __always_inline void sev_es_ist_exit(void)
72 {
73 	if (static_branch_unlikely(&sev_es_enable_key))
74 		__sev_es_ist_exit();
75 }
76 extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
77 extern void __sev_es_nmi_complete(void);
sev_es_nmi_complete(void)78 static __always_inline void sev_es_nmi_complete(void)
79 {
80 	if (static_branch_unlikely(&sev_es_enable_key))
81 		__sev_es_nmi_complete();
82 }
83 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
84 #else
sev_es_ist_enter(struct pt_regs * regs)85 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
sev_es_ist_exit(void)86 static inline void sev_es_ist_exit(void) { }
sev_es_setup_ap_jump_table(struct real_mode_header * rmh)87 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
sev_es_nmi_complete(void)88 static inline void sev_es_nmi_complete(void) { }
sev_es_efi_map_ghcbs(pgd_t * pgd)89 static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
90 #endif
91 
92 #endif
93