xref: /freebsd/sys/i386/i386/exception.S (revision 9768746b)
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007, 2018 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by A. Joseph Koshy under
8 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Portions of this software were developed by Konstantin Belousov
10 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $FreeBSD$
37 */
38
39#include "opt_apic.h"
40#include "opt_atpic.h"
41#include "opt_hwpmc_hooks.h"
42#include "opt_hyperv.h"
43
44#include "assym.inc"
45
46#include <machine/psl.h>
47#include <machine/asmacros.h>
48#include <machine/trap.h>
49
50#ifdef KDTRACE_HOOKS
51	.bss
52	.globl	dtrace_invop_jump_addr
53	.align	4
54	.type	dtrace_invop_jump_addr, @object
55	.size	dtrace_invop_jump_addr, 4
56dtrace_invop_jump_addr:
57	.zero	4
58	.globl	dtrace_invop_calltrap_addr
59	.align	4
60	.type	dtrace_invop_calltrap_addr, @object
61	.size	dtrace_invop_calltrap_addr, 4
62dtrace_invop_calltrap_addr:
63	.zero	8
64#endif
65	.text
66ENTRY(start_exceptions)
67	.globl	tramp_idleptd
68tramp_idleptd:	.long	0
69
70/*****************************************************************************/
71/* Trap handling                                                             */
72/*****************************************************************************/
73/*
74 * Trap and fault vector routines.
75 *
76 * All traps are 'interrupt gates', SDT_SYS386IGT.  Interrupts are disabled
77 * by hardware to not allow interrupts until code switched to the kernel
78 * address space and the kernel thread stack.
79 *
80 * The cpu will push a certain amount of state onto the kernel stack for
81 * the current process.  The amount of state depends on the type of trap
82 * and whether the trap crossed rings or not.  See i386/include/frame.h.
83 * At the very least the current EFLAGS (status register, which includes
84 * the interrupt disable state prior to the trap), the code segment register,
85 * and the return instruction pointer are pushed by the cpu.  The cpu
86 * will also push an 'error' code for certain traps.  We push a dummy
87 * error code for those traps where the cpu doesn't in order to maintain
88 * a consistent frame.  We also push a contrived 'trap number'.
89 *
90 * The cpu does not push the general registers, we must do that, and we
91 * must restore them prior to calling 'iret'.  The cpu adjusts the %cs and
92 * %ss segment registers, but does not mess with %ds, %es, or %fs.  Thus we
93 * must load them with appropriate values for supervisor mode operation.
94 *
95 * This code is not executed at the linked address, it is copied to the
96 * trampoline area.  As the consequence, all code there and in included files
97 * must be PIC.
98 */
99
100#define	TRAP(a)		pushl $(a) ; jmp alltraps
101
102IDTVEC(div)
103	pushl $0; TRAP(T_DIVIDE)
104IDTVEC(bpt)
105	pushl $0; TRAP(T_BPTFLT)
106IDTVEC(dtrace_ret)
107	pushl $0; TRAP(T_DTRACE_RET)
108IDTVEC(ofl)
109	pushl $0; TRAP(T_OFLOW)
110IDTVEC(bnd)
111	pushl $0; TRAP(T_BOUND)
112#ifndef KDTRACE_HOOKS
113IDTVEC(ill)
114	pushl $0; TRAP(T_PRIVINFLT)
115#endif
116IDTVEC(dna)
117	pushl $0; TRAP(T_DNA)
118IDTVEC(fpusegm)
119	pushl $0; TRAP(T_FPOPFLT)
120IDTVEC(tss)
121	TRAP(T_TSSFLT)
122IDTVEC(missing)
123	pushl	$T_SEGNPFLT
124	jmp	irettraps
125IDTVEC(stk)
126	pushl	$T_STKFLT
127	jmp	irettraps
128IDTVEC(prot)
129	pushl	$T_PROTFLT
130	jmp	irettraps
131IDTVEC(page)
132	testl	$PSL_VM, TF_EFLAGS-TF_ERR(%esp)
133	jnz	upf
134	testb	$SEL_RPL_MASK, TF_CS-TF_ERR(%esp)
135	jnz	upf
136	cmpl	$PMAP_TRM_MIN_ADDRESS, TF_EIP-TF_ERR(%esp)
137	jb	upf
138
139	/*
140	 * This is a handshake between copyout_fast.s and page fault
141	 * handler.  We check for page fault occuring at the special
142	 * places in the copyout fast path, where page fault can
143	 * legitimately happen while accessing either user space or
144	 * kernel pageable memory, and return control to *%edx.
145	 * We switch to the idleptd page table from a user page table,
146	 * if needed.
147	 */
148	pushl	%eax
149	movl	TF_EIP-TF_ERR+4(%esp), %eax
150	addl	$1f, %eax
151	call	5f
1521:	cmpl	$pf_x1, %eax
153	je	2f
154	cmpl	$pf_x2, %eax
155	je	2f
156	cmpl	$pf_x3, %eax
157	je	2f
158	cmpl	$pf_x4, %eax
159	je	2f
160	cmpl	$pf_x5, %eax
161	je	2f
162	cmpl	$pf_x6, %eax
163	je	2f
164	cmpl	$pf_x7, %eax
165	je	2f
166	cmpl	$pf_x8, %eax
167	je	2f
168	cmpl	$pf_y1, %eax
169	je	4f
170	cmpl	$pf_y2, %eax
171	je	4f
172	jmp	upf_eax
1732:	movl	$tramp_idleptd, %eax
174	subl	$3f, %eax
175	call	6f
1763:	movl	(%eax), %eax
177	movl	%eax, %cr3
1784:	popl	%eax
179	movl	%edx, TF_EIP-TF_ERR(%esp)
180	addl	$4, %esp
181	iret
1825:	subl	(%esp), %eax
183	retl
1846:	addl	(%esp), %eax
185	retl
186
187upf_eax:popl	%eax
188upf:	pushl	$T_PAGEFLT
189	jmp	alltraps
190IDTVEC(rsvd_pti)
191IDTVEC(rsvd)
192	pushl $0; TRAP(T_RESERVED)
193IDTVEC(fpu)
194	pushl $0; TRAP(T_ARITHTRAP)
195IDTVEC(align)
196	TRAP(T_ALIGNFLT)
197IDTVEC(xmm)
198	pushl $0; TRAP(T_XMMFLT)
199
200	/*
201	 * All traps except ones for syscalls or invalid segment,
202	 * jump to alltraps.  If
203	 * interrupts were enabled when the trap occurred, then interrupts
204	 * are enabled now if the trap was through a trap gate, else
205	 * disabled if the trap was through an interrupt gate.  Note that
206	 * int0x80_syscall is a trap gate.   Interrupt gates are used by
207	 * page faults, non-maskable interrupts, debug and breakpoint
208	 * exceptions.
209	 */
210	SUPERALIGN_TEXT
211	.globl	alltraps
212	.type	alltraps,@function
213alltraps:
214	PUSH_FRAME2
215alltraps_with_regs_pushed:
216	SET_KERNEL_SREGS
217	cld
218	KENTER
219calltrap:
220	pushl	%esp
221	movl	$trap,%eax
222	call	*%eax
223	add	$4, %esp
224
225	/*
226	 * Return via doreti to handle ASTs.
227	 */
228	jmp	doreti
229
230	.globl	irettraps
231	.type	irettraps,@function
232irettraps:
233	testl	$PSL_VM, TF_EFLAGS-TF_TRAPNO(%esp)
234	jnz	alltraps
235	testb	$SEL_RPL_MASK, TF_CS-TF_TRAPNO(%esp)
236	jnz	alltraps
237
238	/*
239	 * Kernel mode.
240	 * The special case there is the kernel mode with user %cr3 and
241	 * trampoline stack. We need to copy both current frame and the
242	 * hardware portion of the frame we tried to return to, to the
243	 * normal stack.  This logic must follow the stack unwind order
244	 * in doreti.
245	 */
246	PUSH_FRAME2
247	SET_KERNEL_SREGS
248	cld
249	call	1f
2501:	popl	%ebx
251	leal	(doreti_iret - 1b)(%ebx), %edx
252	cmpl	%edx, TF_EIP(%esp)
253	jne	2f
254	/* -8 because exception did not switch ring */
255	movl	$(2 * TF_SZ - TF_EIP - 8), %ecx
256	jmp	5f
2572:	leal	(doreti_popl_ds - 1b)(%ebx), %edx
258	cmpl	%edx, TF_EIP(%esp)
259	jne	3f
260	movl	$(2 * TF_SZ - TF_DS - 8), %ecx
261	jmp	5f
2623:	leal	(doreti_popl_es - 1b)(%ebx), %edx
263	cmpl	%edx, TF_EIP(%esp)
264	jne	4f
265	movl	$(2 * TF_SZ - TF_ES - 8), %ecx
266	jmp	5f
2674:	leal	(doreti_popl_fs - 1b)(%ebx), %edx
268	cmpl	%edx, TF_EIP(%esp)
269	jne	calltrap
270	movl	$(2 * TF_SZ - TF_FS - 8), %ecx
2715:	cmpl	$PMAP_TRM_MIN_ADDRESS, %esp	/* trampoline stack ? */
272	jb	calltrap	  /* if not, no need to change stacks */
273	movl	(tramp_idleptd - 1b)(%ebx), %eax
274	movl	%eax, %cr3
275	movl	PCPU(KESP0), %edx
276	subl	%ecx, %edx
277	movl	%edx, %edi
278	movl	%esp, %esi
279	rep; movsb
280	movl	%edx, %esp
281	/* kernel mode, normal */
282	jmp	calltrap
283
284/*
285 * Privileged instruction fault.
286 */
287#ifdef KDTRACE_HOOKS
288	SUPERALIGN_TEXT
289IDTVEC(ill)
290	/*
291	 * Check if this is a user fault.  If so, just handle it as a normal
292	 * trap.
293	 */
294	testl	$PSL_VM, 8(%esp)	/* and vm86 mode. */
295	jnz	norm_ill
296	cmpl	$GSEL_KPL, 4(%esp)	/* Check the code segment */
297	jne	norm_ill
298
299	/*
300	 * Check if a DTrace hook is registered.  The trampoline cannot
301	 * be instrumented.
302	 */
303	cmpl	$0, dtrace_invop_jump_addr
304	je	norm_ill
305
306	/*
307	 * This is a kernel instruction fault that might have been caused
308	 * by a DTrace provider.
309	 */
310	pushal
311	cld
312
313	/*
314	 * Set our jump address for the jump back in the event that
315	 * the exception wasn't caused by DTrace at all.
316	 */
317	movl	$norm_ill, dtrace_invop_calltrap_addr
318
319	/* Jump to the code hooked in by DTrace. */
320	jmpl	*dtrace_invop_jump_addr
321
322	/*
323	 * Process the instruction fault in the normal way.
324	 */
325norm_ill:
326	pushl	$0
327	pushl	$T_PRIVINFLT
328	jmp	alltraps
329#endif
330
331/*
332 * See comment in the handler for the kernel case T_TRCTRAP in trap.c.
333 * The exception handler must be ready to execute with wrong %cr3.
334 * We save original %cr3 in frame->tf_err, similarly to NMI and MCE
335 * handlers.
336 */
337IDTVEC(dbg)
338	pushl	$0
339	pushl	$T_TRCTRAP
340	PUSH_FRAME2
341	SET_KERNEL_SREGS
342	cld
343	movl	%cr3, %eax
344	movl	%eax, TF_ERR(%esp)
345	call	1f
3461:	popl	%eax
347	movl	(tramp_idleptd - 1b)(%eax), %eax
348	movl	%eax, %cr3
349	testl	$PSL_VM, TF_EFLAGS(%esp)
350	jnz	dbg_user
351	testb	$SEL_RPL_MASK,TF_CS(%esp)
352	jz	calltrap
353dbg_user:
354	NMOVE_STACKS
355	movl	$handle_ibrs_entry,%eax
356	call	*%eax
357	pushl	%esp
358	movl	$trap,%eax
359	call	*%eax
360	add	$4, %esp
361	movl	$T_RESERVED, TF_TRAPNO(%esp)
362	jmp	doreti
363
364IDTVEC(mchk)
365	pushl	$0
366	pushl	$T_MCHK
367	jmp	nmi_mchk_common
368
369IDTVEC(nmi)
370	pushl	$0
371	pushl	$T_NMI
372nmi_mchk_common:
373	PUSH_FRAME2
374	SET_KERNEL_SREGS
375	cld
376	/*
377	 * Save %cr3 into tf_err.  There is no good place to put it.
378	 * Always reload %cr3, since we might have interrupted the
379	 * kernel entry or exit.
380	 * Do not switch to the thread kernel stack, otherwise we might
381	 * obliterate the previous context partially copied from the
382	 * trampoline stack.
383	 * Do not re-enable IBRS, there is no good place to store
384	 * previous state if we come from the kernel.
385	 */
386	movl	%cr3, %eax
387	movl	%eax, TF_ERR(%esp)
388	call	1f
3891:	popl	%eax
390	movl	(tramp_idleptd - 1b)(%eax), %eax
391	movl	%eax, %cr3
392	jmp	calltrap
393
394/*
395 * Trap gate entry for syscalls (int 0x80).
396 * This is used by FreeBSD ELF executables, "new" a.out executables, and all
397 * Linux executables.
398 *
399 * Even though the name says 'int0x80', this is actually a trap gate, not an
400 * interrupt gate.  Thus interrupts are enabled on entry just as they are for
401 * a normal syscall.
402 */
403	SUPERALIGN_TEXT
404IDTVEC(int0x80_syscall)
405	pushl	$2			/* sizeof "int 0x80" */
406	pushl	$0			/* tf_trapno */
407	PUSH_FRAME2
408	SET_KERNEL_SREGS
409	cld
410	MOVE_STACKS
411	movl	$handle_ibrs_entry,%eax
412	call	*%eax
413	sti
414	pushl	%esp
415	movl	$syscall, %eax
416	call	*%eax
417	add	$4, %esp
418	jmp	doreti
419
420ENTRY(fork_trampoline)
421	pushl	%esp			/* trapframe pointer */
422	pushl	%ebx			/* arg1 */
423	pushl	%esi			/* function */
424	movl	$fork_exit, %eax
425	call	*%eax
426	addl	$12,%esp
427	/* cut from syscall */
428
429	/*
430	 * Return via doreti to handle ASTs.
431	 */
432	jmp	doreti
433
434
435	.data
436	.p2align 4
437	.text
438	SUPERALIGN_TEXT
439
440#ifdef DEV_ATPIC
441#include <i386/i386/atpic_vector.S>
442#endif
443
444#if defined(DEV_APIC) && defined(DEV_ATPIC)
445	.data
446	.p2align 4
447	.text
448	SUPERALIGN_TEXT
449#endif
450
451#ifdef DEV_APIC
452#include <i386/i386/apic_vector.S>
453#endif
454
455#ifdef HYPERV
456	.data
457	.p2align 4
458	.text
459	SUPERALIGN_TEXT
460#include <dev/hyperv/vmbus/i386/vmbus_vector.S>
461#endif
462
463	.data
464	.p2align 4
465	.text
466	SUPERALIGN_TEXT
467#include <i386/i386/vm86bios.S>
468
469	.text
470
471#include <i386/i386/copyout_fast.S>
472
473/*
474 * void doreti(struct trapframe)
475 *
476 * Handle return from interrupts, traps and syscalls.
477 */
478	.text
479	SUPERALIGN_TEXT
480	.type	doreti,@function
481	.globl	doreti
482doreti:
483doreti_next:
484	/*
485	 * Check if ASTs can be handled now.  ASTs cannot be safely
486	 * processed when returning from an NMI.
487	 */
488	cmpb	$T_NMI,TF_TRAPNO(%esp)
489#ifdef HWPMC_HOOKS
490	je	doreti_nmi
491#else
492	je	doreti_exit
493#endif
494	/*
495	 * PSL_VM must be checked first since segment registers only
496	 * have an RPL in non-VM86 mode.
497	 * ASTs can not be handled now if we are in a vm86 call.
498	 */
499	testl	$PSL_VM,TF_EFLAGS(%esp)
500	jz	doreti_notvm86
501	movl	PCPU(CURPCB),%ecx
502	testl	$PCB_VM86CALL,PCB_FLAGS(%ecx)
503	jz	doreti_ast
504	jmp	doreti_popl_fs
505
506doreti_notvm86:
507	testb	$SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */
508	jz	doreti_exit		/* can't handle ASTs now if not */
509
510doreti_ast:
511	/*
512	 * Check for ASTs atomically with returning.  Disabling CPU
513	 * interrupts provides sufficient locking even in the SMP case,
514	 * since we will be informed of any new ASTs by an IPI.
515	 */
516	cli
517	movl	PCPU(CURTHREAD),%eax
518	cmpl	$0,TD_AST(%eax)
519	je	doreti_exit
520	sti
521	pushl	%esp			/* pass a pointer to the trapframe */
522	movl	$ast, %eax
523	call	*%eax
524	add	$4,%esp
525	jmp	doreti_ast
526
527	/*
528	 * doreti_exit:	pop registers, iret.
529	 *
530	 *	The segment register pop is a special case, since it may
531	 *	fault if (for example) a sigreturn specifies bad segment
532	 *	registers.  The fault is handled in trap.c.
533	 */
534doreti_exit:
535	cmpl	$T_NMI, TF_TRAPNO(%esp)
536	je	doreti_iret_nmi
537	cmpl	$T_MCHK, TF_TRAPNO(%esp)
538	je	doreti_iret_nmi
539	cmpl	$T_TRCTRAP, TF_TRAPNO(%esp)
540	je	doreti_iret_nmi
541	testl	$PSL_VM,TF_EFLAGS(%esp)
542	jnz	1f			/* PCB_VM86CALL is not set */
543	testl	$SEL_RPL_MASK, TF_CS(%esp)
544	jz	doreti_popl_fs
5451:	movl	$handle_ibrs_exit,%eax
546	call	*%eax
547	movl	mds_handler,%eax
548	call	*%eax
549	movl	%esp, %esi
550	movl	PCPU(TRAMPSTK), %edx
551	movl	$TF_SZ, %ecx
552	testl	$PSL_VM,TF_EFLAGS(%esp)
553	jz	2f			/* PCB_VM86CALL is not set */
554	addl	$VM86_STACK_SPACE, %ecx
5552:	subl	%ecx, %edx
556	movl	%edx, %edi
557	rep; movsb
558	movl	%edx, %esp
559	movl	PCPU(CURPCB),%eax
560	movl	PCB_CR3(%eax), %eax
561	movl	%eax, %cr3
562
563	.globl	doreti_popl_fs
564doreti_popl_fs:
565	popl	%fs
566	.globl	doreti_popl_es
567doreti_popl_es:
568	popl	%es
569	.globl	doreti_popl_ds
570doreti_popl_ds:
571	popl	%ds
572	popal
573	addl	$8,%esp
574	.globl	doreti_iret
575doreti_iret:
576	iret
577
578doreti_iret_nmi:
579	movl	TF_ERR(%esp), %eax
580	movl	%eax, %cr3
581	jmp	doreti_popl_fs
582
583	/*
584	 * doreti_iret_fault and friends.  Alternative return code for
585	 * the case where we get a fault in the doreti_exit code
586	 * above.  trap() (i386/i386/trap.c) catches this specific
587	 * case, and continues in the corresponding place in the code
588	 * below.
589	 *
590	 * If the fault occurred during return to usermode, we recreate
591	 * the trap frame and call trap() to send a signal.  Otherwise
592	 * the kernel was tricked into fault by attempt to restore invalid
593	 * usermode segment selectors on return from nested fault or
594	 * interrupt, where interrupted kernel entry code not yet loaded
595	 * kernel selectors.  In the latter case, emulate iret and zero
596	 * the invalid selector.
597	 */
598	ALIGN_TEXT
599	.globl	doreti_iret_fault
600doreti_iret_fault:
601	pushl	$0	/* tf_err */
602	pushl	$0	/* tf_trapno XXXKIB: provide more useful value ? */
603	pushal
604	pushl	$0
605	movw	%ds,(%esp)
606	.globl	doreti_popl_ds_fault
607doreti_popl_ds_fault:
608	testb	$SEL_RPL_MASK,TF_CS-TF_DS(%esp)
609	jz	doreti_popl_ds_kfault
610	pushl	$0
611	movw	%es,(%esp)
612	.globl	doreti_popl_es_fault
613doreti_popl_es_fault:
614	testb	$SEL_RPL_MASK,TF_CS-TF_ES(%esp)
615	jz	doreti_popl_es_kfault
616	pushl	$0
617	movw	%fs,(%esp)
618	.globl	doreti_popl_fs_fault
619doreti_popl_fs_fault:
620	testb	$SEL_RPL_MASK,TF_CS-TF_FS(%esp)
621	jz	doreti_popl_fs_kfault
622	movl	$0,TF_ERR(%esp)	/* XXX should be the error code */
623	movl	$T_PROTFLT,TF_TRAPNO(%esp)
624	SET_KERNEL_SREGS
625	jmp	calltrap
626
627doreti_popl_ds_kfault:
628	movl	$0,(%esp)
629	jmp	doreti_popl_ds
630doreti_popl_es_kfault:
631	movl	$0,(%esp)
632	jmp	doreti_popl_es
633doreti_popl_fs_kfault:
634	movl	$0,(%esp)
635	jmp	doreti_popl_fs
636
637#ifdef HWPMC_HOOKS
638doreti_nmi:
639	/*
640	 * Since we are returning from an NMI, check if the current trap
641	 * was from user mode and if so whether the current thread
642	 * needs a user call chain capture.
643	 */
644	testl	$PSL_VM, TF_EFLAGS(%esp)
645	jnz	doreti_exit
646	testb	$SEL_RPL_MASK,TF_CS(%esp)
647	jz	doreti_exit
648	movl	PCPU(CURTHREAD),%eax	/* curthread present? */
649	orl	%eax,%eax
650	jz	doreti_exit
651	testl	$TDP_CALLCHAIN,TD_PFLAGS(%eax) /* flagged for capture? */
652	jz	doreti_exit
653	/*
654	 * Switch to thread stack.  Reset tf_trapno to not indicate NMI,
655	 * to cause normal userspace exit.
656	 */
657	movl	$T_RESERVED, TF_TRAPNO(%esp)
658	NMOVE_STACKS
659	/*
660	 * Take the processor out of NMI mode by executing a fake "iret".
661	 */
662	pushfl
663	pushl	%cs
664	call	1f
6651:	popl	%eax
666	leal	(outofnmi-1b)(%eax),%eax
667	pushl	%eax
668	iret
669outofnmi:
670	/*
671	 * Call the callchain capture hook after turning interrupts back on.
672	 */
673	movl	pmc_hook,%ecx
674	orl	%ecx,%ecx
675	jz	doreti_exit
676	pushl	%esp			/* frame pointer */
677	pushl	$PMC_FN_USER_CALLCHAIN	/* command */
678	movl	PCPU(CURTHREAD),%eax
679	pushl	%eax			/* curthread */
680	sti
681	call	*%ecx
682	addl	$12,%esp
683	jmp	doreti_ast
684#endif
685
686ENTRY(end_exceptions)
687