xref: /openbsd/sys/arch/amd64/include/frameasm.h (revision 2dd0808e)
1 /*	$OpenBSD: frameasm.h,v 1.27 2023/07/27 00:30:07 guenther Exp $	*/
2 /*	$NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $	*/
3 
4 #ifndef _AMD64_MACHINE_FRAMEASM_H
5 #define _AMD64_MACHINE_FRAMEASM_H
6 
7 /*
8  * Macros to define pushing/popping frames for interrupts, traps
9  * and system calls. Currently all the same; will diverge later.
10  */
11 
12 /*
13  * These are used on interrupt or trap entry or exit.
14  */
15 #define INTR_SAVE_GPRS \
16 	subq	$120,%rsp		; \
17 	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
18 	movq	%rcx,TF_RCX(%rsp)
19 #define INTR_SAVE_MOST_GPRS_NO_ADJ \
20 	movq	%r15,TF_R15(%rsp)	; \
21 	movq	%r14,TF_R14(%rsp)	; \
22 	movq	%r13,TF_R13(%rsp)	; \
23 	movq	%r12,TF_R12(%rsp)	; \
24 	movq	%r11,TF_R11(%rsp)	; \
25 	movq	%r10,TF_R10(%rsp)	; \
26 	movq	%r9,TF_R9(%rsp)		; \
27 	movq	%r8,TF_R8(%rsp)		; \
28 	movq	%rdi,TF_RDI(%rsp)	; \
29 	movq	%rsi,TF_RSI(%rsp)	; \
30 	movq	%rbp,TF_RBP(%rsp)	; \
31 	leaq	TF_RBP(%rsp),%rbp	; \
32 	movq	%rbx,TF_RBX(%rsp)	; \
33 	movq	%rdx,TF_RDX(%rsp)	; \
34 	movq	%rax,TF_RAX(%rsp)
35 
36 /*
37  * We clear registers when coming from userspace to prevent
38  * user-controlled values from being available for use in speculative
39  * execution in the kernel.  %rsp and %rbp are the kernel values when
40  * this is used, so there are only 14 to clear.  32bit operations clear
41  * the register upper-halves automatically.
42  */
43 #define INTR_CLEAR_GPRS \
44 	xorl	%eax,%eax		; \
45 	xorl	%ebx,%ebx		; \
46 	xorl	%ecx,%ecx		; \
47 	xorl	%edx,%edx		; \
48 	xorl	%esi,%esi		; \
49 	xorl	%edi,%edi		; \
50 	xorl	%r8d,%r8d		; \
51 	xorl	%r9d,%r9d		; \
52 	xorl	%r10d,%r10d		; \
53 	xorl	%r11d,%r11d		; \
54 	xorl	%r12d,%r12d		; \
55 	xorl	%r13d,%r13d		; \
56 	xorl	%r14d,%r14d		; \
57 	xorl	%r15d,%r15d
58 
59 
60 /*
61  * For real interrupt code paths, where we can come from userspace.
62  * We only have an iretq_frame on entry.
63  */
64 #define INTRENTRY_LABEL(label)	X##label##_untramp
65 #define	INTRENTRY(label) \
66 	endbr64				; \
67 	testb	$SEL_RPL,IRETQ_CS(%rsp)	; \
68 	je	INTRENTRY_LABEL(label)	; \
69 	swapgs				; \
70 	FENCE_SWAPGS_MIS_TAKEN 		; \
71 	movq	%rax,CPUVAR(SCRATCH)	; \
72 	CODEPATCH_START			; \
73 	movq	CPUVAR(KERN_CR3),%rax	; \
74 	movq	%rax,%cr3		; \
75 	CODEPATCH_END(CPTAG_MELTDOWN_NOP);\
76 	jmp	98f			; \
77 END(X##label)				; \
78 _ENTRY(INTRENTRY_LABEL(label)) /* from kernel */ \
79 	FENCE_NO_SAFE_SMAP		; \
80 	subq	$TF_RIP,%rsp		; \
81 	movq	%rcx,TF_RCX(%rsp)	; \
82 	jmp	99f			; \
83 	_ALIGN_TRAPS			; \
84 98:	/* from userspace */		  \
85 	movq	CPUVAR(KERN_RSP),%rax	; \
86 	xchgq	%rax,%rsp		; \
87 	movq	%rcx,TF_RCX(%rsp)	; \
88 	RET_STACK_REFILL_WITH_RCX	; \
89 	/* copy iretq frame to the trap frame */ \
90 	movq	IRETQ_RIP(%rax),%rcx	; \
91 	movq	%rcx,TF_RIP(%rsp)	; \
92 	movq	IRETQ_CS(%rax),%rcx	; \
93 	movq	%rcx,TF_CS(%rsp)	; \
94 	movq	IRETQ_RFLAGS(%rax),%rcx	; \
95 	movq	%rcx,TF_RFLAGS(%rsp)	; \
96 	movq	IRETQ_RSP(%rax),%rcx	; \
97 	movq	%rcx,TF_RSP(%rsp)	; \
98 	movq	IRETQ_SS(%rax),%rcx	; \
99 	movq	%rcx,TF_SS(%rsp)	; \
100 	movq	CPUVAR(SCRATCH),%rax	; \
101 99:	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
102 	INTR_CLEAR_GPRS			; \
103 	movq	%rax,TF_ERR(%rsp)
104 
105 #define INTRFASTEXIT \
106 	jmp	intr_fast_exit
107 
108 /*
109  * Entry for faking up an interrupt frame after spllower() unblocks
110  * a previously received interrupt.  On entry, %r13 has the %rip
111  * to return to.  %r10 and %r11 are scratch.
112  */
113 #define	INTR_RECURSE \
114 	endbr64				; \
115 	/* fake the iretq_frame */	; \
116 	movq	%rsp,%r10		; \
117 	movl	%ss,%r11d		; \
118 	pushq	%r11			; \
119 	pushq	%r10			; \
120 	pushfq				; \
121 	movl	%cs,%r11d		; \
122 	pushq	%r11			; \
123 	pushq	%r13			; \
124 	/* now do the rest of the intrframe */ \
125 	subq	$16,%rsp		; \
126 	INTR_SAVE_GPRS
127 
128 
129 /*
130  * Entry for traps from kernel, where there's a trapno + err already
131  * on the stack.  We have to move the err from its hardware location
132  * to the location we want it.
133  */
134 #define	TRAP_ENTRY_KERN \
135 	subq	$120,%rsp		; \
136 	movq	%rcx,TF_RCX(%rsp)	; \
137 	movq	(TF_RIP - 8)(%rsp),%rcx	; \
138 	movq	%rcx,TF_ERR(%rsp)	; \
139 	INTR_SAVE_MOST_GPRS_NO_ADJ
140 
141 /*
142  * Entry for traps from userland, where there's a trapno + err on
143  * the iretq stack.
144  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
145  */
146 #define	TRAP_ENTRY_USER \
147 	movq	CPUVAR(KERN_RSP),%rax		; \
148 	xchgq	%rax,%rsp			; \
149 	movq	%rcx,TF_RCX(%rsp)		; \
150 	RET_STACK_REFILL_WITH_RCX		; \
151 	/* copy trapno+err to the trap frame */ \
152 	movq	0(%rax),%rcx			; \
153 	movq	%rcx,TF_TRAPNO(%rsp)		; \
154 	movq	8(%rax),%rcx			; \
155 	movq	%rcx,TF_ERR(%rsp)		; \
156 	/* copy iretq frame to the trap frame */ \
157 	movq	(IRETQ_RIP+16)(%rax),%rcx	; \
158 	movq	%rcx,TF_RIP(%rsp)		; \
159 	movq	(IRETQ_CS+16)(%rax),%rcx	; \
160 	movq	%rcx,TF_CS(%rsp)		; \
161 	movq	(IRETQ_RFLAGS+16)(%rax),%rcx	; \
162 	movq	%rcx,TF_RFLAGS(%rsp)		; \
163 	movq	(IRETQ_RSP+16)(%rax),%rcx	; \
164 	movq	%rcx,TF_RSP(%rsp)		; \
165 	movq	(IRETQ_SS+16)(%rax),%rcx	; \
166 	movq	%rcx,TF_SS(%rsp)		; \
167 	movq	CPUVAR(SCRATCH),%rax		; \
168 	INTR_SAVE_MOST_GPRS_NO_ADJ		; \
169 	INTR_CLEAR_GPRS
170 
171 /*
172  * Entry from syscall instruction, where RIP is in %rcx and RFLAGS is in %r11.
173  * We stash the syscall # in tf_err for SPL check.
174  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
175  */
176 #define	SYSCALL_ENTRY \
177 	movq	CPUVAR(KERN_RSP),%rax				; \
178 	xchgq	%rax,%rsp					; \
179 	movq	%rcx,TF_RCX(%rsp)				; \
180 	movq	%rcx,TF_RIP(%rsp)				; \
181 	RET_STACK_REFILL_WITH_RCX				; \
182 	movq	$(GSEL(GUDATA_SEL, SEL_UPL)),TF_SS(%rsp)	; \
183 	movq	%rax,TF_RSP(%rsp)				; \
184 	movq	CPUVAR(SCRATCH),%rax				; \
185 	INTR_SAVE_MOST_GPRS_NO_ADJ				; \
186 	movq	%r11, TF_RFLAGS(%rsp)				; \
187 	movq	$(GSEL(GUCODE_SEL, SEL_UPL)), TF_CS(%rsp)	; \
188 	movq	%rax,TF_ERR(%rsp)				; \
189 	INTR_CLEAR_GPRS
190 
191 #define CHECK_ASTPENDING(reg)	movq	CPUVAR(CURPROC),reg		; \
192 				cmpq	$0, reg				; \
193 				je	99f				; \
194 				cmpl	$0, P_MD_ASTPENDING(reg)	; \
195 				99:
196 
197 #define CLEAR_ASTPENDING(reg)	movl	$0, P_MD_ASTPENDING(reg)
198 
199 #endif /* _AMD64_MACHINE_FRAMEASM_H */
200