1/*	$NetBSD: amd64_trap.S,v 1.3 2015/11/22 13:41:24 maxv Exp $	*/
2
3/*-
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 2001 Wasabi Systems, Inc.
34 * All rights reserved.
35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 *    must display the following acknowledgement:
48 *      This product includes software developed for the NetBSD Project by
49 *      Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 *    or promote products derived from this software without specific prior
52 *    written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#if 0
68#include <machine/asm.h>
69__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.3 2015/11/22 13:41:24 maxv Exp $");
70#endif
71
72/*
73 * Trap and fault vector routines
74 *
75 * On exit from the kernel to user mode, we always need to check for ASTs.  In
76 * addition, we need to do this atomically; otherwise an interrupt may occur
77 * which causes an AST, but it won't get processed until the next kernel entry
78 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
79 * and only enable them again on the final `iret' or before calling the AST
80 * handler.
81 */
82
83/*****************************************************************************/
84
85#ifdef	XEN
86#define	PRE_TRAP	movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp
87#else
88#define	PRE_TRAP
89#endif
90
91#define	TRAP_NJ(a)	PRE_TRAP ; pushq $(a)
92#define	ZTRAP_NJ(a)	PRE_TRAP ; pushq $0 ; pushq $(a)
93#define	TRAP(a)		TRAP_NJ(a) ; jmp _C_LABEL(alltraps)
94#define	ZTRAP(a)	ZTRAP_NJ(a) ; jmp _C_LABEL(alltraps)
95
96	.text
97
98IDTVEC(trap00)
99	ZTRAP(T_DIVIDE)
100IDTVEC_END(trap00)
101
102IDTVEC(trap01)
103	ZTRAP(T_TRCTRAP)
104IDTVEC_END(trap01)
105
106IDTVEC(trap02)
107#if defined(XEN)
108	ZTRAP(T_NMI)
109#else /* defined(XEN) */
110	pushq $0
111	pushq $T_NMI
112	subq	$TF_REGSIZE,%rsp
113	INTR_SAVE_GPRS
114	movl	$MSR_GSBASE,%ecx
115	rdmsr
116	cmpl	$VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
117	jae	1f
118	swapgs
119	movw	%gs,TF_GS(%rsp)
120	movw	%fs,TF_FS(%rsp)
121	movw	%es,TF_ES(%rsp)
122	movw	%ds,TF_DS(%rsp)
123	movq	%rsp,%rdi
124	incq	CPUVAR(NTRAP)
125	call	_C_LABEL(trap)
126	movw	TF_ES(%rsp),%es
127	movw	TF_DS(%rsp),%ds
128	swapgs
129	jmp	2f
1301:
131	movq	%rsp,%rdi
132	incq	CPUVAR(NTRAP)
133	call	_C_LABEL(trap)
1342:
135	INTR_RESTORE_GPRS
136	addq	$TF_REGSIZE+16,%rsp
137	iretq
138#endif /* defined(XEN) */
139IDTVEC_END(trap02)
140
141IDTVEC(trap03)
142#ifndef KDTRACE_HOOKS
143	ZTRAP(T_BPTFLT)
144#else
145	ZTRAP_NJ(T_BPTFLT)
146	INTRENTRY
147	STI(si)
148	/*
149	 * DTrace Function Boundary Trace (fbt) probes are triggered
150	 * by int3 (0xcc).
151	 */
152	/* Check if there is no DTrace hook registered. */
153	cmpq	$0,dtrace_invop_jump_addr
154	je	calltrap
155
156	/*
157	 * Set our jump address for the jump back in the event that
158	 * the exception wasn't caused by DTrace at all.
159	 */
160	/* XXX: This doesn't look right for SMP - unless it is a
161	 * constant - so why set it everytime. (dsl) */
162	movq	$calltrap, dtrace_invop_calltrap_addr(%rip)
163
164	/* Jump to the code hooked in by DTrace. */
165	movq	dtrace_invop_jump_addr, %rax
166	jmpq	*dtrace_invop_jump_addr
167
168	.bss
169	.globl	dtrace_invop_jump_addr
170	.align	8
171	.type	dtrace_invop_jump_addr, @object
172	.size	dtrace_invop_jump_addr, 8
173dtrace_invop_jump_addr:
174	.zero	8
175	.globl	dtrace_invop_calltrap_addr
176	.align	8
177	.type	dtrace_invop_calltrap_addr, @object
178	.size	dtrace_invop_calltrap_addr, 8
179dtrace_invop_calltrap_addr:
180	.zero	8
181	.text
182#endif
183IDTVEC_END(trap03)
184
185IDTVEC(trap04)
186	ZTRAP(T_OFLOW)
187IDTVEC_END(trap04)
188
189IDTVEC(trap05)
190	ZTRAP(T_BOUND)
191IDTVEC_END(trap05)
192
193IDTVEC(trap06)
194	ZTRAP(T_PRIVINFLT)
195IDTVEC_END(trap06)
196
197IDTVEC(trap07)
198	ZTRAP_NJ(T_DNA)
199	INTRENTRY
200#ifdef DIAGNOSTIC
201	movl	CPUVAR(ILEVEL),%ebx
202#endif /* DIAGNOSTIC */
203	movq	%rsp,%rdi
204	call	_C_LABEL(fpudna)
205	jmp	.Lalltraps_checkusr
206IDTVEC_END(trap07)
207
208IDTVEC(trap08)
209	TRAP(T_DOUBLEFLT)
210IDTVEC_END(trap08)
211
212IDTVEC(trap09)
213	ZTRAP(T_FPOPFLT)
214IDTVEC_END(trap09)
215
216IDTVEC(trap0a)
217	TRAP(T_TSSFLT)
218IDTVEC_END(trap0a)
219
220#ifdef XEN
221/*
222 * I don't believe XEN generates in-kernel traps for the
223 * equivalent of iret, if it does this code would be needed
224 * in order to copy the user segment registers into the fault frame.
225 */
226#define check_swapgs alltraps
227#endif
228
229IDTVEC(trap0b)		/* #NP() Segment not present */
230	TRAP_NJ(T_SEGNPFLT)
231	jmp	check_swapgs
232IDTVEC_END(trap0b)		/* #NP() Segment not present */
233
234IDTVEC(trap0c)		/* #SS() Stack exception */
235	TRAP_NJ(T_STKFLT)
236	jmp	check_swapgs
237IDTVEC_END(trap0c)		/* #SS() Stack exception */
238
239IDTVEC(trap0d)		/* #GP() General protection */
240	TRAP_NJ(T_PROTFLT)
241#ifdef check_swapgs
242	jmp	check_swapgs
243#else
244/* We need to worry about traps while the kernel %gs_base isn't loaded.
245 * These are either loads to %gs (only 32bit) or faults on iret during
246 * return to user. */
247check_swapgs:
248	INTRENTRY_L(3f,1:)
2492:	sti
250	jmp	calltrap
2513:
252	/* Trap in kernel mode. */
253	/* If faulting instruction is 'iret' we may need to do a 'swapgs'. */
254	movq	TF_RIP(%rsp),%rax
255	cmpw	$0xcf48,(%rax)		/* Faulting instruction is iretq ? */
256	jne	5f			/* Jump if not */
257	movq	TF_RSP(%rsp),%rax	/* Must read %rsp, may be a pad word */
258	testb	$SEL_UPL,8(%rax)	/* Check %cs of outer iret frame */
259	je	2b			/* jump if iret was to kernel  */
260	jmp	1b			/* to user - must restore %gs */
2615:
262	/* Not 'iret', all moves to %gs also need a swapgs */
263	movw	(%rax),%ax
264	andb	$070,%ah		/* mask mod/rm from mod/reg/rm */
265	cmpw	$0x8e+050*256,%ax	/* Any move to %gs (reg 5) */
266	jne	2b			/* No - normal kernel fault */
267	jmp	1b			/* Yes - restore %gs */
268#endif
269IDTVEC_END(trap0d)
270
271IDTVEC(trap0e)
272	TRAP(T_PAGEFLT)
273IDTVEC_END(trap0e)
274
275IDTVEC(intrspurious)
276IDTVEC(trap0f)
277	ZTRAP_NJ(T_ASTFLT)
278	INTRENTRY
279#ifdef DIAGNOSTIC
280	movl	CPUVAR(ILEVEL),%ebx
281#endif /* DIAGNOSTIC */
282	jmp	.Lalltraps_checkusr
283IDTVEC_END(trap0f)
284IDTVEC_END(intrspurious)
285
286IDTVEC(trap10)
287	ZTRAP_NJ(T_ARITHTRAP)
288.Ldo_fputrap:
289	INTRENTRY
290#ifdef DIAGNOSTIC
291	movl	CPUVAR(ILEVEL),%ebx
292#endif /* DIAGNOSTIC */
293	movq	%rsp,%rdi
294	call	_C_LABEL(fputrap)
295	jmp	.Lalltraps_checkusr
296IDTVEC_END(trap10)
297
298IDTVEC(trap11)
299	TRAP(T_ALIGNFLT)
300IDTVEC_END(trap11)
301
302IDTVEC(trap12)
303	ZTRAP(T_MCA)
304IDTVEC_END(trap12)
305
306IDTVEC(trap13)
307	ZTRAP_NJ(T_XMM)
308	jmp	.Ldo_fputrap
309IDTVEC_END(trap13)
310
311IDTVEC(trap14)
312IDTVEC(trap15)
313IDTVEC(trap16)
314IDTVEC(trap17)
315IDTVEC(trap18)
316IDTVEC(trap19)
317IDTVEC(trap1a)
318IDTVEC(trap1b)
319IDTVEC(trap1c)
320IDTVEC(trap1d)
321IDTVEC(trap1e)
322IDTVEC(trap1f)
323	/* 20 - 31 reserved for future exp */
324	ZTRAP(T_RESERVED)
325IDTVEC_END(trap1f)
326IDTVEC_END(trap1e)
327IDTVEC_END(trap1d)
328IDTVEC_END(trap1c)
329IDTVEC_END(trap1b)
330IDTVEC_END(trap1a)
331IDTVEC_END(trap19)
332IDTVEC_END(trap18)
333IDTVEC_END(trap17)
334IDTVEC_END(trap16)
335IDTVEC_END(trap15)
336IDTVEC_END(trap14)
337
338IDTVEC(exceptions)
339	.quad	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
340	.quad	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
341	.quad	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
342	.quad	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
343	.quad	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
344	.quad	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
345	.quad	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
346	.quad	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
347	.quad	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
348	.quad	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
349	.quad	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
350	.quad	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
351	.quad	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
352	.quad	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
353	.quad	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
354	.quad	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
355IDTVEC_END(exceptions)
356
357/*
358 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
359 * segment registers or during the iret itself).
360 * The address of the (possibly reconstructed) user trap frame is
361 * passed as an argument.
362 * Typically the code will have raised a SIGSEGV which will be actioned
363 * by the code below.
364 */
365	.type	_C_LABEL(trap_return_fault_return), @function
366LABEL(trap_return_fault_return)
367	mov	%rdi,%rsp		/* frame for user return */
368#ifdef DIAGNOSTIC
369	/* We can't recover the saved %rbx, so suppress warning */
370	movl	CPUVAR(ILEVEL),%ebx
371#endif /* DIAGNOSTIC */
372	jmp	.Lalltraps_checkusr
373END(trap_return_fault_return)
374
375/*
376 * All traps go through here. Call the generic trap handler, and
377 * check for ASTs afterwards.
378 */
379NENTRY(alltraps)
380	INTRENTRY
381	STI(si)
382
383calltrap:
384#ifdef DIAGNOSTIC
385	movl	CPUVAR(ILEVEL),%ebx
386#endif /* DIAGNOSTIC */
387	movq	%rsp,%rdi
388	incq	CPUVAR(NTRAP)
389	call	_C_LABEL(trap)
390.Lalltraps_checkusr:
391	testb	$SEL_RPL,TF_CS(%rsp)
392	jz	6f
393.Lalltraps_checkast:
394	movq	CPUVAR(CURLWP),%r14
395	/* Check for ASTs on exit to user mode. */
396	CLI(si)
397	CHECK_ASTPENDING(%r14)
398	je	3f
399	CLEAR_ASTPENDING(%r14)
400	STI(si)
401	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
402	movq	%rsp,%rdi
403	incq	CPUVAR(NTRAP)
404	call	_C_LABEL(trap)
405	jmp	.Lalltraps_checkast	/* re-check ASTs */
4063:	CHECK_DEFERRED_SWITCH
407	jnz	9f
408#ifndef DIAGNOSTIC
4096:	INTRFASTEXIT
410#else /* DIAGNOSTIC */
4116:	cmpl	CPUVAR(ILEVEL),%ebx
412	jne	3f
413	INTRFASTEXIT
4143:	STI(si)
415	movabsq	$4f,%rdi
416	movl	CPUVAR(ILEVEL),%esi
417	movl	%ebx,%edx
418	xorq	%rax,%rax
419	call	_C_LABEL(printf)
420	movl	%ebx,%edi
421	call	_C_LABEL(spllower)
422	jmp	.Lalltraps_checkast
4234:	.asciz	"WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n"
424#endif /* DIAGNOSTIC */
4259:	STI(si)
426	call	_C_LABEL(do_pmap_load)
427	jmp	.Lalltraps_checkast	/* re-check ASTs */
428END(alltraps)
429