xref: /netbsd/sys/arch/i386/i386/vector.S (revision 6550d01e)
1/*	$NetBSD: vector.S,v 1.56 2011/01/12 23:12:12 joerg Exp $	*/
2
3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*-
39 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum, and by Andrew Doran.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#include <machine/asm.h>
68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.56 2011/01/12 23:12:12 joerg Exp $");
69
70#include "opt_ddb.h"
71#include "opt_multiprocessor.h"
72#include "opt_ipkdb.h"
73#include "opt_vm86.h"
74#include "opt_xen.h"
75#include "opt_dtrace.h"
76
77#include <machine/i8259.h>
78#include <machine/i82093reg.h>
79#include <machine/i82489reg.h>
80#include <machine/frameasm.h>
81#include <machine/segments.h>
82#include <machine/specialreg.h>
83#include <machine/trap.h>
84#ifdef XEN
85#include <xen/xen.h>
86#endif
87
88#include "ioapic.h"
89#include "lapic.h"
90
91#include "npx.h"
92#include "assym.h"
93
94/*
95 * Macros for interrupt entry, call to handler, and exit.
96 *
97 * XXX
98 * The interrupt frame is set up to look like a trap frame.  This may be a
99 * waste.  The only handler which needs a frame is the clock handler, and it
100 * only needs a few bits.  Xdoreti() needs a trap frame for handling ASTs, but
101 * it could easily convert the frame on demand.
102 *
103 * The direct costs of setting up a trap frame are two pushl's (error code and
104 * trap number), an addl to get rid of these, and pushing and popping the
105 * callee-saved registers %esi, %edi, %ebx, and %ebp twice.
106 *
107 * If the interrupt frame is made more flexible,  INTR can push %eax first and
108 * decide the ipending case with less overhead, e.g., by avoiding loading the
109 * segment registers.
110 *
111 */
112
113/*
114 * Store address of TSS in %eax, given a selector in %eax.
115 * Clobbers %eax, %ecx, %edx, but that's ok for its usage.
116 * This is a bit complicated, but it's done to make as few
117 * assumptions as possible about the validity of the environment.
118 * The GDT and the current and previous TSS are known to be OK,
119 * otherwise we would not be here. The only other thing that needs
120 * to be OK is the cpu_info structure for the current CPU.
121 */
122#define GET_TSS \
123	andl	$0xfff8,%eax				;\
124	addl	CPUVAR(GDT),%eax			;\
125	movl	2(%eax),%edx				;\
126	andl	$0xffffff,%edx				;\
127	movzbl	7(%eax),%eax				;\
128	shl	$24,%eax				;\
129	orl	%edx,%eax
130
131#ifdef KDTRACE_HOOKS
132	.bss
133	.globl	dtrace_invop_jump_addr
134	.align	4
135	.type	dtrace_invop_jump_addr, @object
136        .size	dtrace_invop_jump_addr, 4
137dtrace_invop_jump_addr:
138	.zero	4
139	.globl	dtrace_invop_calltrap_addr
140	.align	4
141	.type	dtrace_invop_calltrap_addr, @object
142        .size	dtrace_invop_calltrap_addr, 4
143dtrace_invop_calltrap_addr:
144	.zero	8
145	.text
146#endif
147
148#ifndef XEN
149#if NLAPIC > 0
150#ifdef MULTIPROCESSOR
151/*
152 * General purpose IPI handler.
153 */
154IDTVEC(recurse_lapic_ipi)
155	pushfl
156	pushl	%cs
157	pushl	%esi
158	pushl	$0
159	pushl	$T_ASTFLT
160	INTRENTRY
161	jmp	1f
162IDTVEC_END(recurse_lapic_ipi)
163IDTVEC(intr_lapic_ipi)
164	pushl	$0
165	pushl	$T_ASTFLT
166	INTRENTRY
167	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
168	movl	CPUVAR(ILEVEL),%ebx
169	cmpl	$IPL_HIGH,%ebx
170	jae	2f
171IDTVEC_END(intr_lapic_ipi)
172IDTVEC(resume_lapic_ipi)
1731:
174	pushl	%ebx
175	IDEPTH_INCR
176	movl	$IPL_HIGH,CPUVAR(ILEVEL)
177	sti
178	call	_C_LABEL(x86_ipi_handler)
179	cli
180	jmp	_C_LABEL(Xdoreti)
1812:
182	orl	$(1 << LIR_IPI),CPUVAR(IPENDING)
183	INTRFASTEXIT
184IDTVEC_END(resume_lapic_ipi)
185
186/*
187 * Multicast TLB shootdown handler for !kernel_pmap.
188 */
189IDTVEC(intr_lapic_tlb_mcast)
190	/* Save state. */
191	pushl	%eax
192	pushl	%ebx
193	pushl	%ecx
194	pushl	%edx
195	pushl	%ds
196	pushl	%fs
197	movl	$GSEL(GDATA_SEL, SEL_KPL), %eax
198	movl	$GSEL(GCPU_SEL, SEL_KPL), %edx
199	movl	%eax, %ds
200	movl	%edx, %fs
201	/* Count it. */
202	addl	$1, CPUVAR(TLB_EVCNT)+EV_COUNT
203	adcl	$0, CPUVAR(TLB_EVCNT)+EV_COUNT+4
204	/* Find out what we need to invalidate. */
205	movl	CPUVAR(PMAP_CPU), %ecx
206	movl	MB_ADDR1(%ecx), %eax
207	movl	MB_ADDR2(%ecx), %edx
208	xorl	%ebx, %ebx
209	xchgl	MB_POINTER(%ecx), %ebx
210	movl	$0, _C_LABEL(local_apic)+LAPIC_EOI
211	cmpl	$-1, %eax
212	je	4f
2131:
214	/* Invalidate a single page or a range of pages. */
215	invlpg	(%eax)
216	addl	$PAGE_SIZE, %eax
217	cmpl	%edx, %eax
218	jb	1b
2192:
220	/* Ack the request. */
221	lock
222	incl	(%ebx)
223	/*
224	 * Check the current TLB state.  If we don't want further
225	 * invalidations for this pmap, then take the CPU out of
226	 * the pmap's bitmask.
227	 */
228	cmpl	$TLBSTATE_LAZY, CPUVAR(TLBSTATE)
229	jne	3f
230	movl	CPUVAR(PMAP), %edx
231	movl	CPUVAR(CPUMASK), %ecx
232	notl	%ecx
233	lock
234	andl	%ecx, PM_CPUS(%edx)
235	movl	$TLBSTATE_STALE, CPUVAR(TLBSTATE)
2363:
237	/* Restore state and return. */
238	popl	%fs
239	popl	%ds
240	popl	%edx
241	popl	%ecx
242	popl	%ebx
243	popl	%eax
244	iret
2454:
246	/*
247	 * Get the emap generation number.  Invalidate user TLB entries.
248	 * Perform emap update, pass the generation number.  Note that
249	 * caller-save registers might be modified (all saved in the
250	 * beginning).  Only %ebx value is used by 2b context.
251	 */
252	call	_C_LABEL(uvm_emap_gen_return)
253	movl	%eax, %edx
254	movl	%cr3, %eax
255	movl	%eax, %cr3
256	pushl	%edx
257	call	_C_LABEL(uvm_emap_update)
258	addl	$4, %esp
259	jmp	2b
260IDTVEC_END(intr_lapic_tlb_mcast)
261
262/*
263 * Broadcast TLB shootdown handler for kernel_pmap.
264 */
265IDTVEC(intr_lapic_tlb_bcast)
266	/* Save state and ack the interrupt. */
267	pushl	%eax
268	pushl	%ebx
269	pushl	%ecx
270	pushl	%edx
271	pushl	%ds
272	pushl	%fs
273	movl	$GSEL(GDATA_SEL, SEL_KPL), %eax
274	movl	$GSEL(GCPU_SEL, SEL_KPL), %edx
275	movl	%eax, %ds
276	movl	%edx, %fs
277	/* Find out what we need to invalidate. */
278	movl	%ss:_C_LABEL(pmap_mbox)+MB_ADDR1, %eax
279	movl	%ss:_C_LABEL(pmap_mbox)+MB_ADDR2, %edx
280	movl	%ss:_C_LABEL(pmap_mbox)+MB_GLOBAL, %ebx
281	movl	$0, %ss:_C_LABEL(local_apic)+LAPIC_EOI
282	cmpl	$-1, %eax
283	je,pn	3f
2841:
285	/* Invalidate a single page or a range of pages. */
286	invlpg	%ss:(%eax)
287	addl	$PAGE_SIZE, %eax
288	cmpl	%edx, %eax
289	jb	1b
2902:
291	/* Ack the request, restore state & return. */
292	lock
293	incl	%ss:_C_LABEL(pmap_mbox)+MB_TAIL
294	popl	%fs
295	popl	%ds
296	popl	%edx
297	popl	%ecx
298	popl	%ebx
299	popl	%eax
300	iret
3013:
302	testl	%ebx, %ebx
303	jz	4f
304	/*
305	 * If we have been asked to invalidate the entire TLB we arrive here.
306	 * Get the emap generation before flush, and use it after for update.
307	 * Note that caller-save registers might be modified, though no
308	 * registers need to be preserved for 2b context.
309	 */
310	call	_C_LABEL(uvm_emap_gen_return)
311	movl	%eax, %ebx
312	movl	%cr4, %eax
313	movl	%eax, %edx
314	andl	$~CR4_PGE, %edx
315	movl	%edx, %cr4
316	movl	%eax, %cr4
317	pushl	%ebx
318	call	_C_LABEL(uvm_emap_update)
319	addl	$4, %esp
320	jmp	2b
3214:
322	/*
323	 * Get the emap generation number.  Invalidate user TLB entries.
324	 * Perform emap update, pass the generation number.  Note that
325	 * caller-save registers might be modified, though no registers
326	 * need to be preserved for 2b context.
327	 */
328	call	_C_LABEL(uvm_emap_gen_return)
329	movl	%eax, %ebx
330	movl	%cr3, %eax
331	movl	%eax, %cr3
332	pushl	%ebx
333	call	_C_LABEL(uvm_emap_update)
334	addl	$4, %esp
335	jmp	2b
336IDTVEC_END(intr_lapic_tlb_bcast)
337
338#if defined(DDB)
339IDTVEC(intrddbipi)
3401:
341	str	%ax
342	GET_TSS
343	movzwl	(%eax),%eax
344	GET_TSS
345	pushl	%eax
346	movl	$0xff,_C_LABEL(lapic_tpr)
347	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
348	sti
349	call	_C_LABEL(ddb_ipi_tss)
350	addl	$4,%esp
351	movl	$0,_C_LABEL(lapic_tpr)
352	iret
353	jmp	1b
354IDTVEC_END(intrddbipi)
355#endif /* DDB */
356#endif /* MULTIPROCESSOR */
357
358	/*
359	 * Interrupt from the local APIC timer.
360	 */
361IDTVEC(recurse_lapic_ltimer)
362	pushfl
363	pushl	%cs
364	pushl	%esi
365	pushl	$0
366	pushl	$T_ASTFLT
367	INTRENTRY
368	jmp	1f
369IDTVEC_END(recurse_lapic_ltimer)
370IDTVEC(intr_lapic_ltimer)
371	pushl	$0
372	pushl	$T_ASTFLT
373	INTRENTRY
374	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
375	movl	CPUVAR(ILEVEL),%ebx
376	cmpl	$IPL_CLOCK,%ebx
377	jae	2f
378IDTVEC(resume_lapic_ltimer)
3791:
380	pushl	%ebx
381	IDEPTH_INCR
382	movl	$IPL_CLOCK,CPUVAR(ILEVEL)
383	sti
384	pushl	$0
385	call	_C_LABEL(lapic_clockintr)
386	addl	$4,%esp
387	cli
388	jmp	_C_LABEL(Xdoreti)
3892:
390	orl	$(1 << LIR_TIMER),CPUVAR(IPENDING)
391	INTRFASTEXIT
392IDTVEC_END(intr_lapic_ltimer)
393#endif /* NLAPIC > 0 */
394
395
396#define voidop(num)
397
398
399/*
400 * This macro defines the generic stub code. Its arguments modifiy it
401 * for specific PICs.
402 */
403
404#define	INTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
405IDTVEC(recurse_ ## name ## num)						;\
406	pushfl								;\
407	pushl	%cs							;\
408	pushl	%esi							;\
409	subl	$4,%esp							;\
410	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
411	INTRENTRY							;\
412IDTVEC_END(recurse_ ## name ## num)					;\
413IDTVEC(resume_ ## name ## num)						\
414	movl	$IREENT_MAGIC,TF_ERR(%esp)				;\
415	movl	%ebx,%esi						;\
416	movl	CPUVAR(ISOURCES) + (num) * 4, %ebp			;\
417	movl	IS_MAXLEVEL(%ebp),%ebx					;\
418	jmp	1f							;\
419IDTVEC_END(resume_ ## name ## num)					;\
420IDTVEC(intr_ ## name ## num)						;\
421	pushl	$0			/* dummy error code */		;\
422	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
423	INTRENTRY							;\
424	movl	CPUVAR(ISOURCES) + (num) * 4, %ebp			;\
425	mask(num)		/* mask it in hardware */		;\
426	early_ack(num)			/* and allow other intrs */	;\
427	testl	%ebp,%ebp						;\
428	jz	9f			/* stray */			;\
429	movl	IS_MAXLEVEL(%ebp),%ebx					;\
430	movl	CPUVAR(ILEVEL),%esi					;\
431	cmpl	%ebx,%esi						;\
432	jae	10f			/* currently masked; hold it */	;\
433	addl	$1,CPUVAR(NINTR)	/* statistical info */		;\
434	adcl	$0,CPUVAR(NINTR)+4					;\
435	addl	$1,IS_EVCNTLO(%ebp)	/* inc event counter */		;\
436	adcl	$0,IS_EVCNTHI(%ebp)					;\
4371:									\
438	pushl	%esi							;\
439	movl	%ebx,CPUVAR(ILEVEL)					;\
440	IDEPTH_INCR							;\
441	sti								;\
442	movl	IS_HANDLERS(%ebp),%ebx					;\
4436:									\
444	movl	IH_LEVEL(%ebx),%edi					;\
445	cmpl	%esi,%edi						;\
446	jle	7f							;\
447	pushl	IH_ARG(%ebx)						;\
448	movl	IH_FUN(%ebx),%eax					;\
449	movl	%edi,CPUVAR(ILEVEL)					;\
450	movl	IH_NEXT(%ebx),%ebx	/* next handler in chain */	;\
451	call	*%eax			/* call it */			;\
452	addl	$4,%esp			/* toss the arg */		;\
453	testl	%ebx,%ebx						;\
454	jnz	6b							;\
455	cli								;\
456	unmask(num)			/* unmask it in hardware */	;\
457	late_ack(num)							;\
458	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
4597:									\
460	cli								;\
461	orl     $(1 << num),CPUVAR(IPENDING)				;\
462	level_mask(num)							;\
463	late_ack(num)							;\
464	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
46510:									\
466	orl     $(1 << num),CPUVAR(IPENDING)				;\
467	level_mask(num)							;\
468	late_ack(num)							;\
469	INTRFASTEXIT							;\
4709:									\
471	pushl	%esp			/* for unmask */		;\
472	unmask(num)							;\
473	late_ack(num)							;\
474	addl	$4,%esp							;\
475	INTRFASTEXIT							;\
476IDTVEC_END(intr_ ## name ## num)
477
478#define ICUADDR IO_ICU1
479
480INTRSTUB(legacy,0,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
481    voidop)
482INTRSTUB(legacy,1,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
483    voidop)
484INTRSTUB(legacy,2,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
485    voidop)
486INTRSTUB(legacy,3,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
487    voidop)
488INTRSTUB(legacy,4,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
489    voidop)
490INTRSTUB(legacy,5,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
491    voidop)
492INTRSTUB(legacy,6,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
493    voidop)
494INTRSTUB(legacy,7,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
495    voidop)
496#undef ICUADDR
497#define ICUADDR IO_ICU2
498
499INTRSTUB(legacy,8,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
500    voidop)
501INTRSTUB(legacy,9,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
502    voidop)
503INTRSTUB(legacy,10,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
504    voidop)
505INTRSTUB(legacy,11,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
506    voidop)
507INTRSTUB(legacy,12,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
508    voidop)
509INTRSTUB(legacy,13,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
510    voidop)
511INTRSTUB(legacy,14,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
512    voidop)
513INTRSTUB(legacy,15,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
514    voidop)
515
516#if NIOAPIC > 0
517
518INTRSTUB(ioapic_edge,0,voidop,ioapic_asm_ack,voidop,voidop,voidop)
519INTRSTUB(ioapic_edge,1,voidop,ioapic_asm_ack,voidop,voidop,voidop)
520INTRSTUB(ioapic_edge,2,voidop,ioapic_asm_ack,voidop,voidop,voidop)
521INTRSTUB(ioapic_edge,3,voidop,ioapic_asm_ack,voidop,voidop,voidop)
522INTRSTUB(ioapic_edge,4,voidop,ioapic_asm_ack,voidop,voidop,voidop)
523INTRSTUB(ioapic_edge,5,voidop,ioapic_asm_ack,voidop,voidop,voidop)
524INTRSTUB(ioapic_edge,6,voidop,ioapic_asm_ack,voidop,voidop,voidop)
525INTRSTUB(ioapic_edge,7,voidop,ioapic_asm_ack,voidop,voidop,voidop)
526INTRSTUB(ioapic_edge,8,voidop,ioapic_asm_ack,voidop,voidop,voidop)
527INTRSTUB(ioapic_edge,9,voidop,ioapic_asm_ack,voidop,voidop,voidop)
528INTRSTUB(ioapic_edge,10,voidop,ioapic_asm_ack,voidop,voidop,voidop)
529INTRSTUB(ioapic_edge,11,voidop,ioapic_asm_ack,voidop,voidop,voidop)
530INTRSTUB(ioapic_edge,12,voidop,ioapic_asm_ack,voidop,voidop,voidop)
531INTRSTUB(ioapic_edge,13,voidop,ioapic_asm_ack,voidop,voidop,voidop)
532INTRSTUB(ioapic_edge,14,voidop,ioapic_asm_ack,voidop,voidop,voidop)
533INTRSTUB(ioapic_edge,15,voidop,ioapic_asm_ack,voidop,voidop,voidop)
534INTRSTUB(ioapic_edge,16,voidop,ioapic_asm_ack,voidop,voidop,voidop)
535INTRSTUB(ioapic_edge,17,voidop,ioapic_asm_ack,voidop,voidop,voidop)
536INTRSTUB(ioapic_edge,18,voidop,ioapic_asm_ack,voidop,voidop,voidop)
537INTRSTUB(ioapic_edge,19,voidop,ioapic_asm_ack,voidop,voidop,voidop)
538INTRSTUB(ioapic_edge,20,voidop,ioapic_asm_ack,voidop,voidop,voidop)
539INTRSTUB(ioapic_edge,21,voidop,ioapic_asm_ack,voidop,voidop,voidop)
540INTRSTUB(ioapic_edge,22,voidop,ioapic_asm_ack,voidop,voidop,voidop)
541INTRSTUB(ioapic_edge,23,voidop,ioapic_asm_ack,voidop,voidop,voidop)
542INTRSTUB(ioapic_edge,24,voidop,ioapic_asm_ack,voidop,voidop,voidop)
543INTRSTUB(ioapic_edge,25,voidop,ioapic_asm_ack,voidop,voidop,voidop)
544INTRSTUB(ioapic_edge,26,voidop,ioapic_asm_ack,voidop,voidop,voidop)
545INTRSTUB(ioapic_edge,27,voidop,ioapic_asm_ack,voidop,voidop,voidop)
546INTRSTUB(ioapic_edge,28,voidop,ioapic_asm_ack,voidop,voidop,voidop)
547INTRSTUB(ioapic_edge,29,voidop,ioapic_asm_ack,voidop,voidop,voidop)
548INTRSTUB(ioapic_edge,30,voidop,ioapic_asm_ack,voidop,voidop,voidop)
549INTRSTUB(ioapic_edge,31,voidop,ioapic_asm_ack,voidop,voidop,voidop)
550
551INTRSTUB(ioapic_level,0,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
552INTRSTUB(ioapic_level,1,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
553INTRSTUB(ioapic_level,2,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
554INTRSTUB(ioapic_level,3,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
555INTRSTUB(ioapic_level,4,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
556INTRSTUB(ioapic_level,5,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
557INTRSTUB(ioapic_level,6,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
558INTRSTUB(ioapic_level,7,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
559INTRSTUB(ioapic_level,8,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
560INTRSTUB(ioapic_level,9,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
561INTRSTUB(ioapic_level,10,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
562INTRSTUB(ioapic_level,11,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
563INTRSTUB(ioapic_level,12,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
564INTRSTUB(ioapic_level,13,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
565INTRSTUB(ioapic_level,14,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
566INTRSTUB(ioapic_level,15,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
567INTRSTUB(ioapic_level,16,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
568INTRSTUB(ioapic_level,17,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
569INTRSTUB(ioapic_level,18,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
570INTRSTUB(ioapic_level,19,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
571INTRSTUB(ioapic_level,20,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
572INTRSTUB(ioapic_level,21,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
573INTRSTUB(ioapic_level,22,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
574INTRSTUB(ioapic_level,23,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
575INTRSTUB(ioapic_level,24,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
576INTRSTUB(ioapic_level,25,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
577INTRSTUB(ioapic_level,26,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
578INTRSTUB(ioapic_level,27,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
579INTRSTUB(ioapic_level,28,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
580INTRSTUB(ioapic_level,29,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
581INTRSTUB(ioapic_level,30,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
582INTRSTUB(ioapic_level,31,voidop,ioapic_asm_ack,voidop,ioapic_unmask,ioapic_mask)
583
584#endif
585
586.globl _C_LABEL(i8259_stubs)
587_C_LABEL(i8259_stubs):
588	.long _C_LABEL(Xintr_legacy0), _C_LABEL(Xrecurse_legacy0)
589	.long _C_LABEL(Xresume_legacy0)
590	.long _C_LABEL(Xintr_legacy1), _C_LABEL(Xrecurse_legacy1)
591	.long _C_LABEL(Xresume_legacy1)
592	.long _C_LABEL(Xintr_legacy2), _C_LABEL(Xrecurse_legacy2)
593	.long _C_LABEL(Xresume_legacy2)
594	.long _C_LABEL(Xintr_legacy3), _C_LABEL(Xrecurse_legacy3)
595	.long _C_LABEL(Xresume_legacy3)
596	.long _C_LABEL(Xintr_legacy4), _C_LABEL(Xrecurse_legacy4)
597	.long _C_LABEL(Xresume_legacy4)
598	.long _C_LABEL(Xintr_legacy5), _C_LABEL(Xrecurse_legacy5)
599	.long _C_LABEL(Xresume_legacy5)
600	.long _C_LABEL(Xintr_legacy6), _C_LABEL(Xrecurse_legacy6)
601	.long _C_LABEL(Xresume_legacy6)
602	.long _C_LABEL(Xintr_legacy7), _C_LABEL(Xrecurse_legacy7)
603	.long _C_LABEL(Xresume_legacy7)
604	.long _C_LABEL(Xintr_legacy8), _C_LABEL(Xrecurse_legacy8)
605	.long _C_LABEL(Xresume_legacy8)
606	.long _C_LABEL(Xintr_legacy9), _C_LABEL(Xrecurse_legacy9)
607	.long _C_LABEL(Xresume_legacy9)
608	.long _C_LABEL(Xintr_legacy10), _C_LABEL(Xrecurse_legacy10)
609	.long _C_LABEL(Xresume_legacy10)
610	.long _C_LABEL(Xintr_legacy11), _C_LABEL(Xrecurse_legacy11)
611	.long _C_LABEL(Xresume_legacy11)
612	.long _C_LABEL(Xintr_legacy12), _C_LABEL(Xrecurse_legacy12)
613	.long _C_LABEL(Xresume_legacy12)
614	.long _C_LABEL(Xintr_legacy13), _C_LABEL(Xrecurse_legacy13)
615	.long _C_LABEL(Xresume_legacy13)
616	.long _C_LABEL(Xintr_legacy14), _C_LABEL(Xrecurse_legacy14)
617	.long _C_LABEL(Xresume_legacy14)
618	.long _C_LABEL(Xintr_legacy15), _C_LABEL(Xrecurse_legacy15)
619	.long _C_LABEL(Xresume_legacy15)
620
621#if NIOAPIC > 0
622.globl _C_LABEL(ioapic_edge_stubs)
623_C_LABEL(ioapic_edge_stubs):
624	.long _C_LABEL(Xintr_ioapic_edge0), _C_LABEL(Xrecurse_ioapic_edge0)
625	.long _C_LABEL(Xresume_ioapic_edge0)
626	.long _C_LABEL(Xintr_ioapic_edge1), _C_LABEL(Xrecurse_ioapic_edge1)
627	.long _C_LABEL(Xresume_ioapic_edge1)
628	.long _C_LABEL(Xintr_ioapic_edge2), _C_LABEL(Xrecurse_ioapic_edge2)
629	.long _C_LABEL(Xresume_ioapic_edge2)
630	.long _C_LABEL(Xintr_ioapic_edge3), _C_LABEL(Xrecurse_ioapic_edge3)
631	.long _C_LABEL(Xresume_ioapic_edge3)
632	.long _C_LABEL(Xintr_ioapic_edge4), _C_LABEL(Xrecurse_ioapic_edge4)
633	.long _C_LABEL(Xresume_ioapic_edge4)
634	.long _C_LABEL(Xintr_ioapic_edge5), _C_LABEL(Xrecurse_ioapic_edge5)
635	.long _C_LABEL(Xresume_ioapic_edge5)
636	.long _C_LABEL(Xintr_ioapic_edge6), _C_LABEL(Xrecurse_ioapic_edge6)
637	.long _C_LABEL(Xresume_ioapic_edge6)
638	.long _C_LABEL(Xintr_ioapic_edge7), _C_LABEL(Xrecurse_ioapic_edge7)
639	.long _C_LABEL(Xresume_ioapic_edge7)
640	.long _C_LABEL(Xintr_ioapic_edge8), _C_LABEL(Xrecurse_ioapic_edge8)
641	.long _C_LABEL(Xresume_ioapic_edge8)
642	.long _C_LABEL(Xintr_ioapic_edge9), _C_LABEL(Xrecurse_ioapic_edge9)
643	.long _C_LABEL(Xresume_ioapic_edge9)
644	.long _C_LABEL(Xintr_ioapic_edge10), _C_LABEL(Xrecurse_ioapic_edge10)
645	.long _C_LABEL(Xresume_ioapic_edge10)
646	.long _C_LABEL(Xintr_ioapic_edge11), _C_LABEL(Xrecurse_ioapic_edge11)
647	.long _C_LABEL(Xresume_ioapic_edge11)
648	.long _C_LABEL(Xintr_ioapic_edge12), _C_LABEL(Xrecurse_ioapic_edge12)
649	.long _C_LABEL(Xresume_ioapic_edge12)
650	.long _C_LABEL(Xintr_ioapic_edge13), _C_LABEL(Xrecurse_ioapic_edge13)
651	.long _C_LABEL(Xresume_ioapic_edge13)
652	.long _C_LABEL(Xintr_ioapic_edge14), _C_LABEL(Xrecurse_ioapic_edge14)
653	.long _C_LABEL(Xresume_ioapic_edge14)
654	.long _C_LABEL(Xintr_ioapic_edge15), _C_LABEL(Xrecurse_ioapic_edge15)
655	.long _C_LABEL(Xresume_ioapic_edge15)
656	.long _C_LABEL(Xintr_ioapic_edge16), _C_LABEL(Xrecurse_ioapic_edge16)
657	.long _C_LABEL(Xresume_ioapic_edge16)
658	.long _C_LABEL(Xintr_ioapic_edge17), _C_LABEL(Xrecurse_ioapic_edge17)
659	.long _C_LABEL(Xresume_ioapic_edge17)
660	.long _C_LABEL(Xintr_ioapic_edge18), _C_LABEL(Xrecurse_ioapic_edge18)
661	.long _C_LABEL(Xresume_ioapic_edge18)
662	.long _C_LABEL(Xintr_ioapic_edge19), _C_LABEL(Xrecurse_ioapic_edge19)
663	.long _C_LABEL(Xresume_ioapic_edge19)
664	.long _C_LABEL(Xintr_ioapic_edge20), _C_LABEL(Xrecurse_ioapic_edge20)
665	.long _C_LABEL(Xresume_ioapic_edge20)
666	.long _C_LABEL(Xintr_ioapic_edge21), _C_LABEL(Xrecurse_ioapic_edge21)
667	.long _C_LABEL(Xresume_ioapic_edge21)
668	.long _C_LABEL(Xintr_ioapic_edge22), _C_LABEL(Xrecurse_ioapic_edge22)
669	.long _C_LABEL(Xresume_ioapic_edge22)
670	.long _C_LABEL(Xintr_ioapic_edge23), _C_LABEL(Xrecurse_ioapic_edge23)
671	.long _C_LABEL(Xresume_ioapic_edge23)
672	.long _C_LABEL(Xintr_ioapic_edge24), _C_LABEL(Xrecurse_ioapic_edge24)
673	.long _C_LABEL(Xresume_ioapic_edge24)
674	.long _C_LABEL(Xintr_ioapic_edge25), _C_LABEL(Xrecurse_ioapic_edge25)
675	.long _C_LABEL(Xresume_ioapic_edge25)
676	.long _C_LABEL(Xintr_ioapic_edge26), _C_LABEL(Xrecurse_ioapic_edge26)
677	.long _C_LABEL(Xresume_ioapic_edge26)
678	.long _C_LABEL(Xintr_ioapic_edge27), _C_LABEL(Xrecurse_ioapic_edge27)
679	.long _C_LABEL(Xresume_ioapic_edge27)
680	.long _C_LABEL(Xintr_ioapic_edge28), _C_LABEL(Xrecurse_ioapic_edge28)
681	.long _C_LABEL(Xresume_ioapic_edge28)
682	.long _C_LABEL(Xintr_ioapic_edge29), _C_LABEL(Xrecurse_ioapic_edge29)
683	.long _C_LABEL(Xresume_ioapic_edge29)
684	.long _C_LABEL(Xintr_ioapic_edge30), _C_LABEL(Xrecurse_ioapic_edge30)
685	.long _C_LABEL(Xresume_ioapic_edge30)
686	.long _C_LABEL(Xintr_ioapic_edge31), _C_LABEL(Xrecurse_ioapic_edge31)
687	.long _C_LABEL(Xresume_ioapic_edge31)
688
689.globl _C_LABEL(ioapic_level_stubs)
690_C_LABEL(ioapic_level_stubs):
691	.long _C_LABEL(Xintr_ioapic_level0), _C_LABEL(Xrecurse_ioapic_level0)
692	.long _C_LABEL(Xresume_ioapic_level0)
693	.long _C_LABEL(Xintr_ioapic_level1), _C_LABEL(Xrecurse_ioapic_level1)
694	.long _C_LABEL(Xresume_ioapic_level1)
695	.long _C_LABEL(Xintr_ioapic_level2), _C_LABEL(Xrecurse_ioapic_level2)
696	.long _C_LABEL(Xresume_ioapic_level2)
697	.long _C_LABEL(Xintr_ioapic_level3), _C_LABEL(Xrecurse_ioapic_level3)
698	.long _C_LABEL(Xresume_ioapic_level3)
699	.long _C_LABEL(Xintr_ioapic_level4), _C_LABEL(Xrecurse_ioapic_level4)
700	.long _C_LABEL(Xresume_ioapic_level4)
701	.long _C_LABEL(Xintr_ioapic_level5), _C_LABEL(Xrecurse_ioapic_level5)
702	.long _C_LABEL(Xresume_ioapic_level5)
703	.long _C_LABEL(Xintr_ioapic_level6), _C_LABEL(Xrecurse_ioapic_level6)
704	.long _C_LABEL(Xresume_ioapic_level6)
705	.long _C_LABEL(Xintr_ioapic_level7), _C_LABEL(Xrecurse_ioapic_level7)
706	.long _C_LABEL(Xresume_ioapic_level7)
707	.long _C_LABEL(Xintr_ioapic_level8), _C_LABEL(Xrecurse_ioapic_level8)
708	.long _C_LABEL(Xresume_ioapic_level8)
709	.long _C_LABEL(Xintr_ioapic_level9), _C_LABEL(Xrecurse_ioapic_level9)
710	.long _C_LABEL(Xresume_ioapic_level9)
711	.long _C_LABEL(Xintr_ioapic_level10), _C_LABEL(Xrecurse_ioapic_level10)
712	.long _C_LABEL(Xresume_ioapic_level10)
713	.long _C_LABEL(Xintr_ioapic_level11), _C_LABEL(Xrecurse_ioapic_level11)
714	.long _C_LABEL(Xresume_ioapic_level11)
715	.long _C_LABEL(Xintr_ioapic_level12), _C_LABEL(Xrecurse_ioapic_level12)
716	.long _C_LABEL(Xresume_ioapic_level12)
717	.long _C_LABEL(Xintr_ioapic_level13), _C_LABEL(Xrecurse_ioapic_level13)
718	.long _C_LABEL(Xresume_ioapic_level13)
719	.long _C_LABEL(Xintr_ioapic_level14), _C_LABEL(Xrecurse_ioapic_level14)
720	.long _C_LABEL(Xresume_ioapic_level14)
721	.long _C_LABEL(Xintr_ioapic_level15), _C_LABEL(Xrecurse_ioapic_level15)
722	.long _C_LABEL(Xresume_ioapic_level15)
723	.long _C_LABEL(Xintr_ioapic_level16), _C_LABEL(Xrecurse_ioapic_level16)
724	.long _C_LABEL(Xresume_ioapic_level16)
725	.long _C_LABEL(Xintr_ioapic_level17), _C_LABEL(Xrecurse_ioapic_level17)
726	.long _C_LABEL(Xresume_ioapic_level17)
727	.long _C_LABEL(Xintr_ioapic_level18), _C_LABEL(Xrecurse_ioapic_level18)
728	.long _C_LABEL(Xresume_ioapic_level18)
729	.long _C_LABEL(Xintr_ioapic_level19), _C_LABEL(Xrecurse_ioapic_level19)
730	.long _C_LABEL(Xresume_ioapic_level19)
731	.long _C_LABEL(Xintr_ioapic_level20), _C_LABEL(Xrecurse_ioapic_level20)
732	.long _C_LABEL(Xresume_ioapic_level20)
733	.long _C_LABEL(Xintr_ioapic_level21), _C_LABEL(Xrecurse_ioapic_level21)
734	.long _C_LABEL(Xresume_ioapic_level21)
735	.long _C_LABEL(Xintr_ioapic_level22), _C_LABEL(Xrecurse_ioapic_level22)
736	.long _C_LABEL(Xresume_ioapic_level22)
737	.long _C_LABEL(Xintr_ioapic_level23), _C_LABEL(Xrecurse_ioapic_level23)
738	.long _C_LABEL(Xresume_ioapic_level23)
739	.long _C_LABEL(Xintr_ioapic_level24), _C_LABEL(Xrecurse_ioapic_level24)
740	.long _C_LABEL(Xresume_ioapic_level24)
741	.long _C_LABEL(Xintr_ioapic_level25), _C_LABEL(Xrecurse_ioapic_level25)
742	.long _C_LABEL(Xresume_ioapic_level25)
743	.long _C_LABEL(Xintr_ioapic_level26), _C_LABEL(Xrecurse_ioapic_level26)
744	.long _C_LABEL(Xresume_ioapic_level26)
745	.long _C_LABEL(Xintr_ioapic_level27), _C_LABEL(Xrecurse_ioapic_level27)
746	.long _C_LABEL(Xresume_ioapic_level27)
747	.long _C_LABEL(Xintr_ioapic_level28), _C_LABEL(Xrecurse_ioapic_level28)
748	.long _C_LABEL(Xresume_ioapic_level28)
749	.long _C_LABEL(Xintr_ioapic_level29), _C_LABEL(Xrecurse_ioapic_level29)
750	.long _C_LABEL(Xresume_ioapic_level29)
751	.long _C_LABEL(Xintr_ioapic_level30), _C_LABEL(Xrecurse_ioapic_level30)
752	.long _C_LABEL(Xresume_ioapic_level30)
753	.long _C_LABEL(Xintr_ioapic_level31), _C_LABEL(Xrecurse_ioapic_level31)
754	.long _C_LABEL(Xresume_ioapic_level31)
755#endif
756#else /* XEN */
757#define voidop(num)
758
759#define	XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
760IDTVEC(recurse_ ## name ## num)						;\
761	pushfl								;\
762	pushl	%cs							;\
763	pushl	%esi							;\
764	subl	$4,%esp							;\
765	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
766	INTRENTRY							;\
767	movl	$_C_LABEL(Xdoreti), %esi; /* we now have a trap frame, so loop using doreti instead */ ;\
768IDTVEC(resume_ ## name ## num)						\
769	movl	$IREENT_MAGIC,TF_ERR(%esp)				;\
770	pushl	%ebx							;\
771	movl	CPUVAR(ISOURCES) + (num) * 4, %ebp			;\
772	movl	$num,CPUVAR(ILEVEL)					;\
773	IDEPTH_INCR /* leaves old %esp on stack	*/			;\
774	STI(%eax)							;\
775	movl	IS_HANDLERS(%ebp),%ebx					;\
7766:									\
777	pushl	IH_ARG(%ebx)						;\
778	call	*IH_FUN(%ebx)		/* call it */			;\
779	addl	$4,%esp			/* toss the arg */		;\
780	movl	IH_IPL_NEXT(%ebx),%ebx	/* next handler in chain */	;\
781	testl	%ebx,%ebx						;\
782	jnz	6b							;\
783									\
784	CLI(%eax)							;\
785	unmask(num)			/* unmask it in hardware */	;\
786	late_ack(num)							;\
787	IDEPTH_DECR							;\
788	popl	%ebx							;\
789	jmp	*%esi			/* lower spl and do ASTs */	;\
790
791/*
792 * Just unmasking the event isn't enough, we also need to
793 * reassert the event pending bit if needed. For now just call
794 * the C function doing it, maybe rewrite in inline assembly ?
795 */
796#define hypervisor_asm_unmask(num)			\
797	pushl $num					;\
798	call _C_LABEL(hypervisor_enable_ipl)		;\
799	addl	$4,%esp
800
801XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
802XENINTRSTUB(xenev,1,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
803XENINTRSTUB(xenev,2,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
804XENINTRSTUB(xenev,3,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
805XENINTRSTUB(xenev,4,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
806XENINTRSTUB(xenev,5,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
807XENINTRSTUB(xenev,6,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
808XENINTRSTUB(xenev,7,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
809XENINTRSTUB(xenev,8,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
810XENINTRSTUB(xenev,9,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
811XENINTRSTUB(xenev,10,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
812XENINTRSTUB(xenev,11,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
813XENINTRSTUB(xenev,12,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
814XENINTRSTUB(xenev,13,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
815XENINTRSTUB(xenev,14,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
816XENINTRSTUB(xenev,15,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
817XENINTRSTUB(xenev,16,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
818XENINTRSTUB(xenev,17,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
819XENINTRSTUB(xenev,18,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
820XENINTRSTUB(xenev,19,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
821XENINTRSTUB(xenev,20,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
822XENINTRSTUB(xenev,21,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
823XENINTRSTUB(xenev,22,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
824XENINTRSTUB(xenev,23,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
825XENINTRSTUB(xenev,24,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
826XENINTRSTUB(xenev,25,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
827XENINTRSTUB(xenev,26,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
828XENINTRSTUB(xenev,27,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
829XENINTRSTUB(xenev,28,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
830XENINTRSTUB(xenev,29,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
831XENINTRSTUB(xenev,30,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
832XENINTRSTUB(xenev,31,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
833
834.globl _C_LABEL(xenev_stubs)
835_C_LABEL(xenev_stubs):
836	.long _C_LABEL(Xrecurse_xenev0), _C_LABEL(Xresume_xenev0)
837	.long _C_LABEL(Xrecurse_xenev1) ,_C_LABEL(Xresume_xenev1)
838	.long _C_LABEL(Xrecurse_xenev2) ,_C_LABEL(Xresume_xenev2)
839	.long _C_LABEL(Xrecurse_xenev3) ,_C_LABEL(Xresume_xenev3)
840	.long _C_LABEL(Xrecurse_xenev4) ,_C_LABEL(Xresume_xenev4)
841	.long _C_LABEL(Xrecurse_xenev5) ,_C_LABEL(Xresume_xenev5)
842	.long _C_LABEL(Xrecurse_xenev6) ,_C_LABEL(Xresume_xenev6)
843	.long _C_LABEL(Xrecurse_xenev7) ,_C_LABEL(Xresume_xenev7)
844	.long _C_LABEL(Xrecurse_xenev8) ,_C_LABEL(Xresume_xenev8)
845	.long _C_LABEL(Xrecurse_xenev9) ,_C_LABEL(Xresume_xenev9)
846	.long _C_LABEL(Xrecurse_xenev10), _C_LABEL(Xresume_xenev10)
847	.long _C_LABEL(Xrecurse_xenev11), _C_LABEL(Xresume_xenev11)
848	.long _C_LABEL(Xrecurse_xenev12), _C_LABEL(Xresume_xenev12)
849	.long _C_LABEL(Xrecurse_xenev13), _C_LABEL(Xresume_xenev13)
850	.long _C_LABEL(Xrecurse_xenev14), _C_LABEL(Xresume_xenev14)
851	.long _C_LABEL(Xrecurse_xenev15), _C_LABEL(Xresume_xenev15)
852	.long _C_LABEL(Xrecurse_xenev16), _C_LABEL(Xresume_xenev16)
853	.long _C_LABEL(Xrecurse_xenev17), _C_LABEL(Xresume_xenev17)
854	.long _C_LABEL(Xrecurse_xenev18), _C_LABEL(Xresume_xenev18)
855	.long _C_LABEL(Xrecurse_xenev19), _C_LABEL(Xresume_xenev19)
856	.long _C_LABEL(Xrecurse_xenev20), _C_LABEL(Xresume_xenev20)
857	.long _C_LABEL(Xrecurse_xenev21), _C_LABEL(Xresume_xenev21)
858	.long _C_LABEL(Xrecurse_xenev22), _C_LABEL(Xresume_xenev22)
859	.long _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23)
860	.long _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24)
861	.long _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25)
862	.long _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26)
863	.long _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27)
864	.long _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28)
865	.long _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29)
866	.long _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30)
867	.long _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
868
869#endif /* XEN */
870
871/*
872 * Trap and fault vector routines
873 *
874 * On exit from the kernel to user mode, we always need to check for ASTs.  In
875 * addition, we need to do this atomically; otherwise an interrupt may occur
876 * which causes an AST, but it won't get processed until the next kernel entry
877 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
878 * and only enable them again on the final `iret' or before calling the AST
879 * handler.
880 */
881
882#define TRAP(a)			pushl $(a) ; jmp _C_LABEL(alltraps)
883#define ZTRAP(a)		pushl $0 ; TRAP(a)
884
885#ifdef IPKDB
886#define BPTTRAP(a)	pushl $0; pushl $(a); jmp _C_LABEL(bpttraps)
887#else
888#define BPTTRAP(a)	ZTRAP(a)
889#endif
890
891
892	.text
893IDTVEC(trap00)
894	ZTRAP(T_DIVIDE)
895IDTVEC(trap01)
896	BPTTRAP(T_TRCTRAP)
897IDTVEC(trap02)
898	pushl $0
899	pushl $(T_NMI)
900	INTRENTRY
901	jmp _C_LABEL(calltrap)
902IDTVEC(trap03)
903	BPTTRAP(T_BPTFLT)
904IDTVEC(trap04)
905	ZTRAP(T_OFLOW)
906IDTVEC(trap05)
907	ZTRAP(T_BOUND)
908/*
909 * Privileged instruction fault.
910 */
911#ifdef KDTRACE_HOOKS
912	SUPERALIGN_TEXT
913IDTVEC(trap06)
914	/* Check if there is no DTrace hook registered. */
915	cmpl	$0,dtrace_invop_jump_addr
916	je	norm_ill
917
918	/* Check if this is a user fault. */
919	/* XXX this was 0x0020 in FreeBSD */
920	cmpl	$GSEL(GCODE_SEL, SEL_KPL), 4(%esp)   /* Check code segment. */
921
922	/* If so, just handle it as a normal trap. */
923	jne	norm_ill
924
925	/*
926	 * This is a kernel instruction fault that might have been caused
927	 * by a DTrace provider.
928	 */
929	pushal				/* Push all registers onto the stack. */
930
931	/*
932	 * Set our jump address for the jump back in the event that
933	 * the exception wasn't caused by DTrace at all.
934	 */
935	movl	$norm_ill, dtrace_invop_calltrap_addr
936
937	/* Jump to the code hooked in by DTrace. */
938	jmpl	*dtrace_invop_jump_addr
939
940	/*
941	 * Process the instruction fault in the normal way.
942	 */
943norm_ill:
944	ZTRAP(T_PRIVINFLT)
945#else
946IDTVEC(trap06)
947	ZTRAP(T_PRIVINFLT)
948#endif
949IDTVEC(trap07)
950#if NNPX > 0
951	pushl	$0			# dummy error code
952	pushl	$T_DNA
953	INTRENTRY
954#ifdef DIAGNOSTIC
955	movl	CPUVAR(ILEVEL),%ebx
956#endif
957	pushl	CPUVAR(SELF)
958	call	*_C_LABEL(npxdna_func)
959	addl	$4,%esp
960	testl	%eax,%eax
961	jz	calltrap
962	jmp	_C_LABEL(trapreturn)
963#else
964#ifndef XEN
965	sti
966#endif
967	ZTRAP(T_DNA)
968#endif
969IDTVEC(trap08)
970	TRAP(T_DOUBLEFLT)
971IDTVEC(trap09)
972	ZTRAP(T_FPOPFLT)
973IDTVEC(trap0a)
974	TRAP(T_TSSFLT)
975IDTVEC(trap0b)
976	TRAP(T_SEGNPFLT)
977IDTVEC(trap0c)
978	TRAP(T_STKFLT)
979IDTVEC(trap0d)
980	TRAP(T_PROTFLT)
981#ifndef XEN
982IDTVEC(trap0e)
983	pushl	$T_PAGEFLT
984	INTRENTRY
985	STI(%eax)
986	testb	$PGEX_U,TF_ERR(%esp)
987	jnz	calltrap
988	movl	%cr2,%eax
989	subl	_C_LABEL(pentium_idt),%eax
990	cmpl	$(6*8),%eax
991	jne	calltrap
992	movb	$T_PRIVINFLT,TF_TRAPNO(%esp)
993	jmp	calltrap
994#else /* !XEN */
995IDTVEC(trap0e)
996	TRAP(T_PAGEFLT)
997#endif /* !XEN */
998
999IDTVEC(intrspurious)
1000IDTVEC(trap0f)
1001	/*
1002	 * The Pentium Pro local APIC may erroneously call this vector for a
1003	 * default IR7.  Just ignore it.
1004	 *
1005	 * (The local APIC does this when CPL is raised while it's on the
1006	 * way to delivering an interrupt.. presumably enough has been set
1007	 * up that it's inconvenient to abort delivery completely..)
1008	 */
1009	pushl	$0			# dummy error code
1010	pushl	$T_ASTFLT
1011	INTRENTRY
1012	STI(%eax)
1013#ifdef DIAGNOSTIC
1014	movl	CPUVAR(ILEVEL),%ebx
1015#endif
1016	jmp	_C_LABEL(trapreturn)
1017
1018IDTVEC(trap10)
1019#if NNPX > 0
1020	/*
1021	 * Handle like an interrupt so that we can call npxintr to clear the
1022	 * error.  It would be better to handle npx interrupts as traps but
1023	 * this is difficult for nested interrupts.
1024	 */
1025	pushl	$0			# dummy error code
1026	pushl	$T_ASTFLT
1027	INTRENTRY
1028	movl	CPUVAR(ILEVEL),%ebx
1029	pushl	%ebx
1030	pushl	%esp
1031	pushl	$0			# dummy arg
1032	addl	$1,CPUVAR(NTRAP)	# statistical info
1033	adcl	$0,CPUVAR(NTRAP)+4
1034	call	_C_LABEL(npxintr)
1035	addl	$12,%esp
1036	jmp	_C_LABEL(trapreturn)
1037#else
1038	sti
1039	ZTRAP(T_ARITHTRAP)
1040#endif
1041IDTVEC(trap11)
1042	TRAP(T_ALIGNFLT)
1043#ifdef XEN
1044IDTVEC(trap12)
1045IDTVEC(trap13)
1046#else
1047IDTVEC(trap12)
1048	ZTRAP(T_MCA)
1049IDTVEC(trap13)
1050	ZTRAP(T_XMM)
1051#endif
1052IDTVEC(trap14)
1053IDTVEC(trap15)
1054IDTVEC(trap16)
1055IDTVEC(trap17)
1056IDTVEC(trap18)
1057IDTVEC(trap19)
1058IDTVEC(trap1a)
1059IDTVEC(trap1b)
1060IDTVEC(trap1c)
1061IDTVEC(trap1d)
1062IDTVEC(trap1e)
1063IDTVEC(trap1f)
1064	/* 20 - 31 reserved for future exp */
1065	ZTRAP(T_RESERVED)
1066
1067IDTVEC(exceptions)
1068	.long	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
1069	.long	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
1070	.long	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
1071	.long	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
1072	.long	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
1073	.long	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
1074	.long	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
1075	.long	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
1076	.long	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
1077	.long	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
1078	.long	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
1079	.long	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
1080	.long	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
1081	.long	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
1082	.long	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
1083	.long	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
1084
1085
1086IDTVEC(tss_trap08)
10871:
1088	str	%ax
1089	GET_TSS
1090	movzwl	(%eax),%eax
1091	GET_TSS
1092	pushl	$T_DOUBLEFLT
1093	pushl	%eax
1094	call	_C_LABEL(trap_tss)
1095	addl	$12,%esp
1096	iret
1097	jmp	1b
1098
1099/*
1100 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
1101 * segment registers or during the iret itself).
1102 * The address of the (possibly reconstructed) user trap frame is
1103 * passed as an argument.
1104 * Typically the code will have raised a SIGSEGV which will be actioned
1105 * by the code below.
1106 */
1107_C_LABEL(trap_return_fault_return):	.globl	trap_return_fault_return
1108	mov	4(%esp),%esp	/* frame for user return */
1109	jmp	_C_LABEL(trapreturn)
1110
1111/* LINTSTUB: Ignore */
1112NENTRY(alltraps)
1113	INTRENTRY
1114	STI(%eax)
1115calltrap:
1116#ifdef DIAGNOSTIC
1117	movl	CPUVAR(ILEVEL),%ebx
1118#endif /* DIAGNOSTIC */
1119	addl	$1,CPUVAR(NTRAP)	# statistical info
1120	adcl	$0,CPUVAR(NTRAP)+4
1121	pushl	%esp
1122	call	_C_LABEL(trap)
1123	addl	$4,%esp
1124_C_LABEL(trapreturn):	.globl	trapreturn
1125	testb	$CHK_UPL,TF_CS(%esp)
1126	jnz	.Lalltraps_checkast
1127#ifdef VM86
1128	testl	$PSL_VM,TF_EFLAGS(%esp)
1129	jz	6f
1130#else
1131	jmp	6f
1132#endif
1133.Lalltraps_checkast:
1134	/* Check for ASTs on exit to user mode. */
1135	CLI(%eax)
1136	CHECK_ASTPENDING(%eax)
1137	jz	3f
11385:	CLEAR_ASTPENDING(%eax)
1139	STI(%eax)
1140	movl	$T_ASTFLT,TF_TRAPNO(%esp)
1141	addl	$1,CPUVAR(NTRAP)	# statistical info
1142	adcl	$0,CPUVAR(NTRAP)+4
1143	pushl	%esp
1144	call	_C_LABEL(trap)
1145	addl	$4,%esp
1146	jmp	.Lalltraps_checkast	/* re-check ASTs */
11473:	CHECK_DEFERRED_SWITCH
1148	jnz	9f
1149#ifdef XEN
1150	STIC(%eax)
1151	jz      6f
1152	call    _C_LABEL(stipending)
1153	testl   %eax,%eax
1154	jz      6f
1155	/* process pending interrupts */
1156	CLI(%eax)
1157	movl    CPUVAR(ILEVEL), %ebx
1158	movl    $.Lalltraps_resume, %esi # address to resume loop at
1159.Lalltraps_resume:
1160	movl    %ebx,%eax               # get cpl
1161	movl    CPUVAR(IUNMASK)(,%eax,4),%eax
1162	andl    CPUVAR(IPENDING),%eax   # any non-masked bits left?
1163	jz	7f
1164	bsrl    %eax,%eax
1165	btrl    %eax,CPUVAR(IPENDING)
1166	movl    CPUVAR(ISOURCES)(,%eax,4),%eax
1167	jmp     *IS_RESUME(%eax)
11687:      movl    %ebx, CPUVAR(ILEVEL) #restore cpl
1169	jmp     _C_LABEL(trapreturn)
1170#endif /* XEN */
1171#ifndef DIAGNOSTIC
11726:	INTRFASTEXIT
1173#else
11746:	cmpl	CPUVAR(ILEVEL),%ebx
1175	jne	3f
1176	INTRFASTEXIT
11773:	STI(%eax)
1178	pushl	$4f
1179	call	_C_LABEL(panic)
1180	addl	$4,%esp
1181	pushl	%ebx
1182	call	_C_LABEL(spllower)
1183	addl	$4,%esp
1184	jmp	.Lalltraps_checkast	/* re-check ASTs */
11854:	.asciz	"SPL NOT LOWERED ON TRAP EXIT\n"
1186#endif /* DIAGNOSTIC */
11879:	STI(%eax)
1188	call	_C_LABEL(pmap_load)
1189	jmp	.Lalltraps_checkast	/* re-check ASTs */
1190
1191#ifdef IPKDB
1192/* LINTSTUB: Ignore */
1193NENTRY(bpttraps)
1194	INTRENTRY
1195	call	_C_LABEL(ipkdb_trap_glue)
1196	testl	%eax,%eax
1197	jz	calltrap
1198	INTRFASTEXIT
1199
1200ipkdbsetup:
1201	popl	%ecx
1202
1203	/* Disable write protection: */
1204	movl	%cr0,%eax
1205	pushl	%eax
1206	andl	$~CR0_WP,%eax
1207	movl	%eax,%cr0
1208
1209	/* Substitute Protection & Page Fault handlers: */
1210	movl	_C_LABEL(idt),%edx
1211	pushl	13*8(%edx)
1212	pushl	13*8+4(%edx)
1213	pushl	14*8(%edx)
1214	pushl	14*8+4(%edx)
1215	movl	$fault,%eax
1216	movw	%ax,13*8(%edx)
1217	movw	%ax,14*8(%edx)
1218	shrl	$16,%eax
1219	movw	%ax,13*8+6(%edx)
1220	movw	%ax,14*8+6(%edx)
1221
1222	pushl	%ecx
1223	ret
1224
1225ipkdbrestore:
1226	popl	%ecx
1227
1228	/* Restore Protection & Page Fault handlers: */
1229	movl	_C_LABEL(idt),%edx
1230	popl	14*8+4(%edx)
1231	popl	14*8(%edx)
1232	popl	13*8+4(%edx)
1233	popl	13*8(%edx)
1234
1235	/* Restore write protection: */
1236	popl	%edx
1237	movl	%edx,%cr0
1238
1239	pushl	%ecx
1240	ret
1241#endif /* IPKDB */
1242
1243#ifdef IPKDB
1244/* LINTSTUB: Func: int ipkdbfbyte(u_char *c) */
1245NENTRY(ipkdbfbyte)
1246	pushl	%ebp
1247	movl	%esp,%ebp
1248	call	ipkdbsetup
1249	movl	8(%ebp),%edx
1250	movzbl	(%edx),%eax
1251faultexit:
1252	call	ipkdbrestore
1253	popl	%ebp
1254	ret
1255
1256/* LINTSTUB: Func: int ipkdbsbyte(u_char *c, int i) */
1257NENTRY(ipkdbsbyte)
1258	pushl	%ebp
1259	movl	%esp,%ebp
1260	call	ipkdbsetup
1261	movl	8(%ebp),%edx
1262	movl	12(%ebp),%eax
1263	movb	%al,(%edx)
1264	call	ipkdbrestore
1265	popl	%ebp
1266	ret
1267
1268fault:
1269	popl	%eax		/* error code */
1270	movl	$faultexit,%eax
1271	movl	%eax,(%esp)
1272	movl	$-1,%eax
1273	iret
1274#endif	/* IPKDB */
1275
1276#ifdef XEN
1277
1278/*
1279 * A note on the "critical region" in our callback handler.
1280 * We want to avoid stacking callback handlers due to events occurring
1281 * during handling of the last event. To do this, we keep events disabled
1282 * until weve done all processing. HOWEVER, we must enable events before
1283 * popping the stack frame (cant be done atomically) and so it would still
1284 * be possible to get enough handler activations to overflow the stack.
1285 * Although unlikely, bugs of that kind are hard to track down, so wed
1286 * like to avoid the possibility.
1287 * So, on entry to the handler we detect whether we interrupted an
1288 * existing activation in its critical region -- if so, we pop the current
1289 * activation and restart the handler using the previous one.
1290 */
1291NENTRY(hypervisor_callback)
1292	pushl	$0			# dummy error code
1293	pushl	$T_ASTFLT
1294	INTRENTRY
1295        movl TF_EIP(%esp),%eax
1296        cmpl $scrit,%eax
1297        jb   11f
1298        cmpl $ecrit,%eax
1299        jb   critical_region_fixup
130011:     pushl CPUVAR(ILEVEL)
1301        push %esp
1302        call do_hypervisor_callback
1303        add  $8,%esp
1304        xorl %eax,%eax
1305        movb TF_CS(%esp),%cl
1306        test $CHK_UPL,%cl		# slow return to ring 2 or 3
1307        je   safesti
1308        movl CPUVAR(ILEVEL),%ebx
1309        jmp  doreti_checkast
1310safesti:
1311	movl CPUVAR(VCPU),%esi
1312	XEN_UNBLOCK_EVENTS(%esi)	# reenable event callbacks
1313scrit:  /**** START OF CRITICAL REGION ****/
1314        XEN_TEST_PENDING(%esi)
1315        jnz  14f			# process more events if necessary...
1316        INTRFASTEXIT
1317critiret:
131814:     XEN_BLOCK_EVENTS(%esi)
1319        jmp  11b
1320ecrit:  /**** END OF CRITICAL REGION ****/
1321/*
1322 * [How we do the fixup]. We want to merge the current stack frame with the
1323 * just-interrupted frame. How we do this depends on where in the critical
1324 * region the interrupted handler was executing, and so how many saved
1325 * registers are in each frame. We do this quickly using the lookup table
1326 * 'critical_fixup_table'. For each byte offset in the critical region, it
1327 * provides the number of bytes which have already been popped from the
1328 * interrupted stack frame.
1329 */
1330critical_region_fixup:
1331        cmpl	$(critiret-1),%eax	    # eip points to iret?
1332	jne	1f
1333	movl	$(TF_PUSHSIZE+0x8),%eax
1334	jmp	2f
13351:	xorl	%eax,%eax
13362:
1337	/* %eax contains num bytes popped */
1338        mov  %esp,%esi
1339        add  %eax,%esi        # %esi points at end of src region
1340        mov  %esp,%edi
1341        add  $(TF_PUSHSIZE+0x8+0xC),%edi # %edi points at end of dst region
1342        mov  %eax,%ecx
1343        shr  $2,%ecx          # convert words to bytes
1344        je   16f              # skip loop if nothing to copy
134515:     subl $4,%esi          # pre-decrementing copy loop
1346        subl $4,%edi
1347        movl (%esi),%eax
1348        movl %eax,(%edi)
1349        loop 15b
135016:     movl %edi,%esp        # final %edi is top of merged stack
1351        jmp  11b
1352
1353
1354/*
1355 * Hypervisor uses this for application faults while it executes.
1356 */
1357NENTRY(failsafe_callback)
1358	pop	%ds
1359	pop	%es
1360	pop	%fs
1361	pop	%gs
1362	call	_C_LABEL(xen_failsafe_handler)
1363	iret
1364
1365#endif /* XEN */
1366