xref: /openbsd/sys/arch/amd64/include/frameasm.h (revision 097a140d)
1 /*	$OpenBSD: frameasm.h,v 1.25 2020/11/12 23:29:16 guenther Exp $	*/
2 /*	$NetBSD: frameasm.h,v 1.1 2003/04/26 18:39:40 fvdl Exp $	*/
3 
4 #ifndef _AMD64_MACHINE_FRAMEASM_H
5 #define _AMD64_MACHINE_FRAMEASM_H
6 
7 /*
8  * Macros to define pushing/popping frames for interrupts, traps
9  * and system calls. Currently all the same; will diverge later.
10  */
11 
12 /*
13  * These are used on interrupt or trap entry or exit.
14  */
15 #define INTR_SAVE_GPRS \
16 	subq	$120,%rsp		; \
17 	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
18 	movq	%rcx,TF_RCX(%rsp)
19 #define INTR_SAVE_MOST_GPRS_NO_ADJ \
20 	movq	%r15,TF_R15(%rsp)	; \
21 	movq	%r14,TF_R14(%rsp)	; \
22 	movq	%r13,TF_R13(%rsp)	; \
23 	movq	%r12,TF_R12(%rsp)	; \
24 	movq	%r11,TF_R11(%rsp)	; \
25 	movq	%r10,TF_R10(%rsp)	; \
26 	movq	%r9,TF_R9(%rsp)		; \
27 	movq	%r8,TF_R8(%rsp)		; \
28 	movq	%rdi,TF_RDI(%rsp)	; \
29 	movq	%rsi,TF_RSI(%rsp)	; \
30 	movq	%rbp,TF_RBP(%rsp)	; \
31 	leaq	TF_RBP(%rsp),%rbp	; \
32 	movq	%rbx,TF_RBX(%rsp)	; \
33 	movq	%rdx,TF_RDX(%rsp)	; \
34 	movq	%rax,TF_RAX(%rsp)
35 
36 /*
37  * We clear registers when coming from userspace to prevent
38  * user-controlled values from being available for use in speculative
39  * execution in the kernel.  %rsp and %rbp are the kernel values when
40  * this is used, so there are only 14 to clear.  32bit operations clear
41  * the register upper-halves automatically.
42  */
43 #define INTR_CLEAR_GPRS \
44 	xorl	%eax,%eax		; \
45 	xorl	%ebx,%ebx		; \
46 	xorl	%ecx,%ecx		; \
47 	xorl	%edx,%edx		; \
48 	xorl	%esi,%esi		; \
49 	xorl	%edi,%edi		; \
50 	xorl	%r8d,%r8d		; \
51 	xorl	%r9d,%r9d		; \
52 	xorl	%r10d,%r10d		; \
53 	xorl	%r11d,%r11d		; \
54 	xorl	%r12d,%r12d		; \
55 	xorl	%r13d,%r13d		; \
56 	xorl	%r14d,%r14d		; \
57 	xorl	%r15d,%r15d
58 
59 
60 /*
61  * For real interrupt code paths, where we can come from userspace.
62  * We only have an iretq_frame on entry.
63  */
64 #define INTRENTRY_LABEL(label)	X##label##_untramp
65 #define	INTRENTRY(label) \
66 	testb	$SEL_RPL,IRETQ_CS(%rsp)	; \
67 	je	INTRENTRY_LABEL(label)	; \
68 	swapgs				; \
69 	FENCE_SWAPGS_MIS_TAKEN 		; \
70 	movq	%rax,CPUVAR(SCRATCH)	; \
71 	CODEPATCH_START			; \
72 	movq	CPUVAR(KERN_CR3),%rax	; \
73 	movq	%rax,%cr3		; \
74 	CODEPATCH_END(CPTAG_MELTDOWN_NOP);\
75 	jmp	98f			; \
76 END(X##label)				; \
77 _ENTRY(INTRENTRY_LABEL(label)) /* from kernel */ \
78 	FENCE_NO_SAFE_SMAP		; \
79 	subq	$TF_RIP,%rsp		; \
80 	movq	%rcx,TF_RCX(%rsp)	; \
81 	jmp	99f			; \
82 	_ALIGN_TRAPS			; \
83 98:	/* from userspace */		  \
84 	movq	CPUVAR(KERN_RSP),%rax	; \
85 	xchgq	%rax,%rsp		; \
86 	movq	%rcx,TF_RCX(%rsp)	; \
87 	RET_STACK_REFILL_WITH_RCX	; \
88 	/* copy iretq frame to the trap frame */ \
89 	movq	IRETQ_RIP(%rax),%rcx	; \
90 	movq	%rcx,TF_RIP(%rsp)	; \
91 	movq	IRETQ_CS(%rax),%rcx	; \
92 	movq	%rcx,TF_CS(%rsp)	; \
93 	movq	IRETQ_RFLAGS(%rax),%rcx	; \
94 	movq	%rcx,TF_RFLAGS(%rsp)	; \
95 	movq	IRETQ_RSP(%rax),%rcx	; \
96 	movq	%rcx,TF_RSP(%rsp)	; \
97 	movq	IRETQ_SS(%rax),%rcx	; \
98 	movq	%rcx,TF_SS(%rsp)	; \
99 	movq	CPUVAR(SCRATCH),%rax	; \
100 99:	INTR_SAVE_MOST_GPRS_NO_ADJ	; \
101 	INTR_CLEAR_GPRS			; \
102 	movq	%rax,TF_ERR(%rsp)
103 
104 #define INTRFASTEXIT \
105 	jmp	intr_fast_exit
106 
107 /*
108  * Entry for faking up an interrupt frame after spllower() unblocks
109  * a previously received interrupt.  On entry, %r13 has the %rip
110  * to return to.  %r10 and %r11 are scratch.
111  */
112 #define	INTR_RECURSE \
113 	/* fake the iretq_frame */	; \
114 	movq	%rsp,%r10		; \
115 	movl	%ss,%r11d		; \
116 	pushq	%r11			; \
117 	pushq	%r10			; \
118 	pushfq				; \
119 	movl	%cs,%r11d		; \
120 	pushq	%r11			; \
121 	pushq	%r13			; \
122 	/* now do the rest of the intrframe */ \
123 	subq	$16,%rsp		; \
124 	INTR_SAVE_GPRS
125 
126 
127 /*
128  * Entry for traps from kernel, where there's a trapno + err already
129  * on the stack.  We have to move the err from its hardware location
130  * to the location we want it.
131  */
132 #define	TRAP_ENTRY_KERN \
133 	subq	$120,%rsp		; \
134 	movq	%rcx,TF_RCX(%rsp)	; \
135 	movq	(TF_RIP - 8)(%rsp),%rcx	; \
136 	movq	%rcx,TF_ERR(%rsp)	; \
137 	INTR_SAVE_MOST_GPRS_NO_ADJ
138 
139 /*
140  * Entry for traps from userland, where there's a trapno + err on
141  * the iretq stack.
142  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
143  */
144 #define	TRAP_ENTRY_USER \
145 	movq	CPUVAR(KERN_RSP),%rax		; \
146 	xchgq	%rax,%rsp			; \
147 	movq	%rcx,TF_RCX(%rsp)		; \
148 	RET_STACK_REFILL_WITH_RCX		; \
149 	/* copy trapno+err to the trap frame */ \
150 	movq	0(%rax),%rcx			; \
151 	movq	%rcx,TF_TRAPNO(%rsp)		; \
152 	movq	8(%rax),%rcx			; \
153 	movq	%rcx,TF_ERR(%rsp)		; \
154 	/* copy iretq frame to the trap frame */ \
155 	movq	(IRETQ_RIP+16)(%rax),%rcx	; \
156 	movq	%rcx,TF_RIP(%rsp)		; \
157 	movq	(IRETQ_CS+16)(%rax),%rcx	; \
158 	movq	%rcx,TF_CS(%rsp)		; \
159 	movq	(IRETQ_RFLAGS+16)(%rax),%rcx	; \
160 	movq	%rcx,TF_RFLAGS(%rsp)		; \
161 	movq	(IRETQ_RSP+16)(%rax),%rcx	; \
162 	movq	%rcx,TF_RSP(%rsp)		; \
163 	movq	(IRETQ_SS+16)(%rax),%rcx	; \
164 	movq	%rcx,TF_SS(%rsp)		; \
165 	movq	CPUVAR(SCRATCH),%rax		; \
166 	INTR_SAVE_MOST_GPRS_NO_ADJ		; \
167 	INTR_CLEAR_GPRS
168 
169 /*
170  * Entry from syscall instruction, where RIP is in %rcx and RFLAGS is in %r11.
171  * We stash the syscall # in tf_err for SPL check.
172  * Assumes that %rax has been saved in CPUVAR(SCRATCH).
173  */
174 #define	SYSCALL_ENTRY \
175 	movq	CPUVAR(KERN_RSP),%rax				; \
176 	xchgq	%rax,%rsp					; \
177 	movq	%rcx,TF_RCX(%rsp)				; \
178 	movq	%rcx,TF_RIP(%rsp)				; \
179 	RET_STACK_REFILL_WITH_RCX				; \
180 	movq	$(GSEL(GUDATA_SEL, SEL_UPL)),TF_SS(%rsp)	; \
181 	movq	%rax,TF_RSP(%rsp)				; \
182 	movq	CPUVAR(SCRATCH),%rax				; \
183 	INTR_SAVE_MOST_GPRS_NO_ADJ				; \
184 	movq	%r11, TF_RFLAGS(%rsp)				; \
185 	movq	$(GSEL(GUCODE_SEL, SEL_UPL)), TF_CS(%rsp)	; \
186 	movq	%rax,TF_ERR(%rsp)				; \
187 	INTR_CLEAR_GPRS
188 
189 #define CHECK_ASTPENDING(reg)	movq	CPUVAR(CURPROC),reg		; \
190 				cmpq	$0, reg				; \
191 				je	99f				; \
192 				cmpl	$0, P_MD_ASTPENDING(reg)	; \
193 				99:
194 
195 #define CLEAR_ASTPENDING(reg)	movl	$0, P_MD_ASTPENDING(reg)
196 
197 #endif /* _AMD64_MACHINE_FRAMEASM_H */
198