xref: /netbsd/sys/arch/i386/i386/i386_trap.S (revision 87531432)
1/*	$NetBSD: i386_trap.S,v 1.23 2022/09/07 00:40:18 knakahara Exp $	*/
2
3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum, and by Andrew Doran.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#if 0
68#include <machine/asm.h>
69__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.23 2022/09/07 00:40:18 knakahara Exp $");
70#endif
71
72/*
73 * Trap and fault vector routines
74 *
75 * On exit from the kernel to user mode, we always need to check for ASTs.  In
76 * addition, we need to do this atomically; otherwise an interrupt may occur
77 * which causes an AST, but it won't get processed until the next kernel entry
78 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
79 * and only enable them again on the final `iret' or before calling the AST
80 * handler.
81 */
82
83#define TRAP(a)		pushl $(a) ; jmp _C_LABEL(alltraps)
84#define ZTRAP(a)	pushl $0 ; TRAP(a)
85
86	.text
87IDTVEC(trap00)
88	ZTRAP(T_DIVIDE)
89IDTVEC_END(trap00)
90
91/*
92 * Handle the SS shadow, CVE-2018-8897.
93 *
94 * We scan the IDT to determine if we hit an entry point. If so, we leave
95 * without restoring the segregs, because we could fault while doing that.
96 */
97IDTVEC(trap01)
98#ifndef XENPV
99	pushl	$0
100	pushl	$T_TRCTRAP
101	INTRENTRY
102
103	testb	$SEL_UPL,TF_CS(%esp)
104	jnz	.Lnormal_dbentry
105
106	pushl	%esp
107	call	ss_shadow
108	addl	$4,%esp
109
110	cmpl	$1,%eax
111	jne	.Lnormal_dbentry
112
113	/* SS shadow, ignore the exception. */
114	xorl	%eax,%eax
115	movl	%eax,%dr6
116
117	/* INTRFASTEXIT, but without segregs. */
118	movl	TF_EDI(%esp),%edi
119	movl	TF_ESI(%esp),%esi
120	movl	TF_EBP(%esp),%ebp
121	movl	TF_EBX(%esp),%ebx
122	movl	TF_EDX(%esp),%edx
123	movl	TF_ECX(%esp),%ecx
124	movl	TF_EAX(%esp),%eax
125	addl	$(TF_PUSHSIZE+8),%esp
126	iret
127
128.Lnormal_dbentry:
129	STI(%eax)
130	jmp _C_LABEL(calltrap)
131#else
132	ZTRAP(T_TRCTRAP)
133#endif
134IDTVEC_END(trap01)
135
136/*
137 * Non Maskable Interrupts are a special case: they can be triggered even
138 * with interrupts disabled, and once triggered they block further NMIs
139 * until an 'iret' instruction is executed.
140 *
141 * Therefore we don't enable interrupts, because the CPU could switch to
142 * another LWP, call 'iret' and unintentionally leave the NMI mode.
143 */
144IDTVEC(trap02)
145	pushl	$0
146	pushl	$(T_NMI)
147	INTRENTRY
148
149	addl	$1,CPUVAR(NTRAP)	/* statistical info */
150	adcl	$0,CPUVAR(NTRAP)+4
151	pushl	%esp
152	call	_C_LABEL(trap)
153	addl	$4,%esp
154
155	INTRFASTEXIT
156IDTVEC_END(trap02)
157
158IDTVEC(trap03)
159	ZTRAP(T_BPTFLT)
160IDTVEC_END(trap03)
161
162IDTVEC(trap04)
163	ZTRAP(T_OFLOW)
164IDTVEC_END(trap04)
165
166IDTVEC(trap05)
167	ZTRAP(T_BOUND)
168IDTVEC_END(trap05)
169
170/*
171 * Privileged instruction fault.
172 */
173#ifdef KDTRACE_HOOKS
174	SUPERALIGN_TEXT
175IDTVEC(trap06)
176	/* Check if there is no DTrace hook registered. */
177	cmpl	$0,%ss:dtrace_invop_jump_addr
178	je	norm_ill
179
180	/* Check if this is a user fault. */
181	/* XXX this was 0x0020 in FreeBSD */
182	cmpl	$GSEL(GCODE_SEL, SEL_KPL),4(%esp) /* Check code segment. */
183
184	/* If so, just handle it as a normal trap. */
185	jne	norm_ill
186
187	/*
188	 * This is a kernel instruction fault that might have been caused
189	 * by a DTrace provider.
190	 */
191
192	/*
193	 * Set our jump address for the jump back in the event that
194	 * the exception wasn't caused by DTrace at all.
195	 */
196	movl	$norm_ill,dtrace_invop_calltrap_addr
197
198	/* Jump to the code hooked in by DTrace. */
199	jmpl	*dtrace_invop_jump_addr
200
201	/*
202	 * Process the instruction fault in the normal way.
203	 */
204norm_ill:
205	ZTRAP(T_PRIVINFLT)
206IDTVEC_END(trap06)
207#else
208IDTVEC(trap06)
209	ZTRAP(T_PRIVINFLT)
210IDTVEC_END(trap06)
211#endif
212
213IDTVEC(trap07)
214	pushl	$0			/* dummy error code */
215	pushl	$T_DNA
216	INTRENTRY
217#ifdef DIAGNOSTIC
218	movzbl	CPUVAR(ILEVEL),%ebx
219#endif
220	pushl	%esp
221	call	_C_LABEL(fpudna)
222	addl	$4,%esp
223	jmp	.Lalltraps_checkusr
224IDTVEC_END(trap07)
225
226IDTVEC(trap08)
227	TRAP(T_DOUBLEFLT)
228IDTVEC_END(trap08)
229
230IDTVEC(trap09)
231	ZTRAP(T_FPOPFLT)
232IDTVEC_END(trap09)
233
234IDTVEC(trap0a)
235	TRAP(T_TSSFLT)
236IDTVEC_END(trap0a)
237
238IDTVEC(trap0b)
239	TRAP(T_SEGNPFLT)
240IDTVEC_END(trap0b)
241
242IDTVEC(trap0c)
243	TRAP(T_STKFLT)
244IDTVEC_END(trap0c)
245
246IDTVEC(trap0d)
247	TRAP(T_PROTFLT)
248IDTVEC_END(trap0d)
249
250IDTVEC(trap0e)
251#ifndef XENPV
252	pushl	$T_PAGEFLT
253	INTRENTRY
254	STI(%eax)
255	testb	$PGEX_U,TF_ERR(%esp)
256	jnz	calltrap
257	movl	%cr2,%eax
258	subl	_C_LABEL(pentium_idt),%eax
259	cmpl	$(6*8),%eax
260	jne	calltrap
261	movb	$T_PRIVINFLT,TF_TRAPNO(%esp)
262	jmp	calltrap
263#else
264	TRAP(T_PAGEFLT)
265#endif
266IDTVEC_END(trap0e)
267
268IDTVEC(intrspurious)
269IDTVEC(trap0f)
270	/*
271	 * The Pentium Pro local APIC may erroneously call this vector for a
272	 * default IR7.  Just ignore it.
273	 *
274	 * (The local APIC does this when CPL is raised while it's on the
275	 * way to delivering an interrupt.. presumably enough has been set
276	 * up that it's inconvenient to abort delivery completely..)
277	 */
278	pushl	$0			/* dummy error code */
279	pushl	$T_ASTFLT
280	INTRENTRY
281	STI(%eax)
282#ifdef DIAGNOSTIC
283	movzbl	CPUVAR(ILEVEL),%ebx
284#endif
285	jmp	.Lalltraps_checkusr
286IDTVEC_END(trap0f)
287IDTVEC_END(intrspurious)
288
289IDTVEC(trap10)
290	/*
291	 * Handle like an interrupt so that we can call npxintr to clear the
292	 * error.  It would be better to handle npx interrupts as traps but
293	 * this is difficult for nested interrupts.
294	 */
295	pushl	$0			/* dummy error code */
296	pushl	$T_ARITHTRAP
297.Ldo_fputrap:
298	INTRENTRY
299	movzbl	CPUVAR(ILEVEL),%ebx
300	pushl	%esp
301	addl	$1,CPUVAR(NTRAP)	/* statistical info */
302	adcl	$0,CPUVAR(NTRAP)+4
303	call	_C_LABEL(fputrap)
304	addl	$4,%esp
305	jmp	.Lalltraps_checkusr
306IDTVEC_END(trap10)
307
308IDTVEC(trap11)
309	TRAP(T_ALIGNFLT)
310IDTVEC_END(trap11)
311
312IDTVEC(trap12)
313	ZTRAP(T_MCA)
314IDTVEC_END(trap12)
315
316IDTVEC(trap13)
317	pushl	$0			/* dummy error code */
318	pushl	$T_XMM
319	jmp	.Ldo_fputrap
320IDTVEC_END(trap13)
321
322IDTVEC(trap14)
323IDTVEC(trap15)
324IDTVEC(trap16)
325IDTVEC(trap17)
326IDTVEC(trap18)
327IDTVEC(trap19)
328IDTVEC(trap1a)
329IDTVEC(trap1b)
330IDTVEC(trap1c)
331IDTVEC(trap1d)
332IDTVEC(trap1e)
333IDTVEC(trap1f)
334	/* 20 - 31 reserved for future exp */
335	ZTRAP(T_RESERVED)
336IDTVEC_END(trap1f)
337IDTVEC_END(trap1e)
338IDTVEC_END(trap1d)
339IDTVEC_END(trap1c)
340IDTVEC_END(trap1b)
341IDTVEC_END(trap1a)
342IDTVEC_END(trap19)
343IDTVEC_END(trap18)
344IDTVEC_END(trap17)
345IDTVEC_END(trap16)
346IDTVEC_END(trap15)
347IDTVEC_END(trap14)
348IDTVEC_END(trap13)
349IDTVEC_END(trap12)
350IDTVEC_END(trap11)
351
352IDTVEC(exceptions)
353	.long	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
354	.long	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
355	.long	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
356	.long	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
357	.long	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
358	.long	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
359	.long	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
360	.long	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
361	.long	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
362	.long	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
363	.long	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
364	.long	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
365	.long	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
366	.long	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
367	.long	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
368	.long	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
369IDTVEC_END(exceptions)
370
371
372IDTVEC(tss_trap08)
3731:
374	str	%ax
375	GET_TSS
376	movzwl	(%eax),%eax
377	GET_TSS
378	pushl	$T_DOUBLEFLT
379	pushl	%eax
380	call	_C_LABEL(trap_tss)
381	addl	$12,%esp
382	iret
383	jmp	1b
384IDTVEC_END(tss_trap08)
385
386/*
387 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
388 * segment registers or during the iret itself).
389 * The address of the (possibly reconstructed) user trap frame is
390 * passed as an argument.
391 * Typically the code will have raised a SIGSEGV which will be actioned
392 * by the code below.
393 */
394	.type	_C_LABEL(trap_return_fault_return),@function
395LABEL(trap_return_fault_return)
396	mov	4(%esp),%esp	/* frame for user return */
397	jmp	.Lalltraps_checkusr
398END(trap_return_fault_return)
399
400/* LINTSTUB: Ignore */
401ENTRY(alltraps)
402	INTRENTRY
403	STI(%eax)
404
405calltrap:
406#ifdef DIAGNOSTIC
407	movzbl	CPUVAR(ILEVEL),%ebx
408#endif
409	addl	$1,CPUVAR(NTRAP)	/* statistical info */
410	adcl	$0,CPUVAR(NTRAP)+4
411	pushl	%esp
412	call	_C_LABEL(trap)
413	addl	$4,%esp
414
415.Lalltraps_checkusr:
416	testb	$CHK_UPL,TF_CS(%esp)
417	jnz	.Lalltraps_checkast
418	jmp	6f
419
420.Lalltraps_checkast:
421	/* Check for ASTs on exit to user mode. */
422	CLI(%eax)
423	CHECK_ASTPENDING(%eax)
424	jz	3f
425	CLEAR_ASTPENDING(%eax)
426	STI(%eax)
427	movl	$T_ASTFLT,TF_TRAPNO(%esp)
428	addl	$1,CPUVAR(NTRAP)	/* statistical info */
429	adcl	$0,CPUVAR(NTRAP)+4
430	pushl	%esp
431	call	_C_LABEL(trap)
432	addl	$4,%esp
433	jmp	.Lalltraps_checkast	/* re-check ASTs */
4343:	CHECK_DEFERRED_SWITCH
435	jnz	9f
436
437	HANDLE_DEFERRED_FPU
438
439#ifdef XENPV
440	STIC(%eax)
441	jz	22f
442	call	_C_LABEL(stipending)
443	testl	%eax,%eax
444	jz	22f
445	/* process pending interrupts */
446	CLI(%eax)
447	movzbl	CPUVAR(ILEVEL),%ebx
448	movl	$.Lalltraps_resume,%esi /* address to resume loop at */
449.Lalltraps_resume:
450	movl	%ebx,%eax		/* get cpl */
451	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
452	andl	CPUVAR(IPENDING),%eax	/* any non-masked bits left? */
453	jz	11f
454	bsrl	%eax,%eax
455	btrl	%eax,CPUVAR(IPENDING)
456	movl	CPUVAR(ISOURCES)(,%eax,4),%eax
457	jmp	*IS_RESUME(%eax)
45811:	movb	%bl,CPUVAR(ILEVEL)	/* restore cpl */
459	jmp	.Lalltraps_checkusr
46022:
461#endif /* XEN */
462
4636:
464#ifdef DIAGNOSTIC
465	cmpb	CPUVAR(ILEVEL),%bl
466	jne	.Lspl_error
467#endif
468	INTRFASTEXIT
469
4709:	STI(%eax)
471	call	_C_LABEL(pmap_load)
472	jmp	.Lalltraps_checkast	/* re-check ASTs */
473
474#ifdef DIAGNOSTIC
475.Lspl_error:
476	STI(%eax)
477	pushl	$4f
478	call	_C_LABEL(panic)
479	addl	$4,%esp
480	pushl	%ebx
481	call	_C_LABEL(spllower)
482	addl	$4,%esp
483	jmp	.Lalltraps_checkast	/* re-check ASTs */
4844:	.asciz	"SPL NOT LOWERED ON TRAP EXIT\n"
485#endif
486END(alltraps)
487