xref: /netbsd/sys/arch/i386/i386/vector.S (revision c4a72b64)
1/*	$NetBSD: vector.S,v 1.4 2002/12/04 23:13:09 fvdl Exp $	*/
2
3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*-
39 * Copyright (c) 1998 The NetBSD Foundation, Inc.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 *    must display the following acknowledgement:
55 *        This product includes software developed by the NetBSD
56 *        Foundation, Inc. and its contributors.
57 * 4. Neither the name of The NetBSD Foundation nor the names of its
58 *    contributors may be used to endorse or promote products derived
59 *    from this software without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
62 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
63 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
64 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
65 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
66 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
67 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
68 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
69 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
70 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
71 * POSSIBILITY OF SUCH DAMAGE.
72 */
73
74#include "opt_ddb.h"
75#include "opt_multiprocessor.h"
76#include "opt_ipkdb.h"
77
78#include <machine/i8259.h>
79#include <machine/i82093reg.h>
80#include <machine/i82489reg.h>
81#include <machine/asm.h>
82#include <machine/frameasm.h>
83#include <machine/segments.h>
84#include <machine/trap.h>
85#include <machine/intr.h>
86#include <machine/psl.h>
87
88#include <net/netisr.h>
89
90#include "ioapic.h"
91#include "lapic.h"
92
93#include "npx.h"
94#include "assym.h"
95
96#define __HAVE_GENERIC_SOFT_INTERRUPTS	/* XXX */
97
98
99/*
100 * Macros for interrupt entry, call to handler, and exit.
101 *
102 * XXX
103 * The interrupt frame is set up to look like a trap frame.  This may be a
104 * waste.  The only handler which needs a frame is the clock handler, and it
105 * only needs a few bits.  Xdoreti() needs a trap frame for handling ASTs, but
106 * it could easily convert the frame on demand.
107 *
108 * The direct costs of setting up a trap frame are two pushl's (error code and
109 * trap number), an addl to get rid of these, and pushing and popping the
110 * callee-saved registers %esi, %edi, %ebx, and %ebp twice.
111 *
112 * If the interrupt frame is made more flexible,  INTR can push %eax first and
113 * decide the ipending case with less overhead, e.g., by avoiding loading the
114 * segment registers.
115 *
116 */
117
118#define MY_COUNT _C_LABEL(uvmexp)
119
120/* XXX See comment in locore.s */
121#ifdef __ELF__
122#define	XINTR(name,num)		Xintr_/**/name/**/num
123#define	XSTRAY(name,num)	Xstray_/**/name/**/num
124#define XINTR_TSS(irq_num)	Xintr_tss_ ## irq_num
125#else
126#define	XINTR(name,num)		_Xintr_/**/name/**/num
127#define	XSTRAY(name,num)	_Xstray_/**/name/**/num
128#define XINTR_TSS(irq_num)	Xintr_tss_/**/irq_num
129#endif
130
131/*
132 * Store address of TSS in %eax, given a selector in %eax.
133 * Clobbers %eax, %ecx, %edx, but that's ok for its usage.
134 * This is a bit complicated, but it's done to make as few
135 * assumptions as possible about the validity of the environment.
136 * The GDT and the current and previous TSS are known to be OK,
137 * otherwise we would not be here. The only other thing that needs
138 * to be OK is the cpu_info structure for the current CPU.
139 */
140#define GET_TSS \
141	andl	$0xfff8,%eax				;\
142	addl	CPUVAR(GDT),%eax			;\
143	movl	2(%eax),%edx				;\
144	andl	$0xffffff,%edx				;\
145	movzbl	7(%eax),%eax				;\
146	shl	$24,%eax				;\
147	orl	%edx,%eax
148
149#if NLAPIC > 0
150#ifdef MULTIPROCESSOR
151IDTVEC(recurse_lapic_ipi)
152	pushfl
153	pushl	%cs
154	pushl	%esi
155	pushl	$0
156	pushl	$T_ASTFLT
157	INTRENTRY
158IDTVEC(resume_lapic_ipi)
159	cli
160	jmp	1f
161IDTVEC(intr_lapic_ipi)
162	pushl	$0
163	pushl	$T_ASTFLT
164	INTRENTRY
165	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
166	movl	CPUVAR(ILEVEL),%ebx
167	cmpl	$IPL_IPI,%ebx
168	jae	2f
1691:
170	incl	CPUVAR(IDEPTH)
171	movl	$IPL_IPI,CPUVAR(ILEVEL)
172        sti
173	pushl	%ebx
174	call	_C_LABEL(i386_ipi_handler)
175	jmp	_C_LABEL(Xdoreti)
1762:
177	orl	$(1 << LIR_IPI),CPUVAR(IPENDING)
178	sti
179	INTRFASTEXIT
180
181#if defined(DDB)
182IDTVEC(intrddbipi)
1831:
184	str	%ax
185	GET_TSS
186	movzwl	(%eax),%eax
187	GET_TSS
188	pushl	%eax
189	movl	$0xff,_C_LABEL(lapic_tpr)
190	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
191	sti
192	call	_C_LABEL(ddb_ipi_tss)
193	addl	$4,%esp
194	movl	$0,_C_LABEL(lapic_tpr)
195	iret
196	jmp	1b
197#endif /* DDB */
198#endif /* MULTIPROCESSOR */
199
200	/*
201	 * Interrupt from the local APIC timer.
202	 */
203IDTVEC(recurse_lapic_ltimer)
204	pushfl
205	pushl	%cs
206	pushl	%esi
207	pushl	$0
208	pushl	$T_ASTFLT
209	INTRENTRY
210IDTVEC(resume_lapic_ltimer)
211	cli
212	jmp	1f
213IDTVEC(intr_lapic_ltimer)
214	pushl	$0
215	pushl	$T_ASTFLT
216	INTRENTRY
217	movl	$0,_C_LABEL(local_apic)+LAPIC_EOI
218	movl	CPUVAR(ILEVEL),%ebx
219	cmpl	$IPL_CLOCK,%ebx
220	jae	2f
2211:
222	incl	CPUVAR(IDEPTH)
223	movl	$IPL_CLOCK,CPUVAR(ILEVEL)
224	sti
225	pushl	%ebx
226	pushl	$0
227	call	_C_LABEL(lapic_clockintr)
228	addl	$4,%esp
229	jmp	_C_LABEL(Xdoreti)
2302:
231	orl	$(1 << LIR_TIMER),CPUVAR(IPENDING)
232	sti
233	INTRFASTEXIT
234#endif /* NLAPIC > 0 */
235
236#ifdef MULTIPROCESSOR
237#define LOCK_KERNEL	call _C_LABEL(i386_intlock)
238#define UNLOCK_KERNEL	call _C_LABEL(i386_intunlock)
239#else
240#define LOCK_KERNEL
241#define UNLOCK_KERNEL
242#endif
243
244#define voidop(num)
245
246
247/*
248 * This macro defines the generic stub code. Its arguments modifiy it
249 * for specific PICs.
250 */
251
252#define	INTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_check) \
253IDTVEC(recurse_/**/name/**/num)						;\
254	pushfl								;\
255	pushl	%cs							;\
256	pushl	%esi							;\
257	pushl	$0			/* dummy error code */		;\
258	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
259	INTRENTRY							;\
260IDTVEC(resume_/**/name/**/num)						\
261	movl	%ebx,%esi						;\
262	movl	CPUVAR(ISOURCES) + (num) * 4, %ebp			;\
263	movl	IS_MAXLEVEL(%ebp),%ebx					;\
264	jmp	1f							;\
265IDTVEC(intr_/**/name/**/num)						;\
266	pushl	$0			/* dummy error code */		;\
267	pushl	$T_ASTFLT		/* trap # for doing ASTs */	;\
268	INTRENTRY							;\
269	movl	CPUVAR(ISOURCES) + (num) * 4, %ebp		;\
270	mask(num)		/* mask it in hardware */	;\
271	early_ack(num)			/* and allow other intrs */	;\
272	testl	%ebp,%ebp						;\
273	jz	9f			/* stray */			;\
274	movl	IS_MAXLEVEL(%ebp),%ebx					;\
275	movl	CPUVAR(ILEVEL),%esi					;\
276	cmpl	%ebx,%esi						;\
277	jae	10f			/* currently masked; hold it */	;\
278	incl	MY_COUNT+V_INTR		/* statistical info */		;\
279	addl	$1,IS_EVCNTLO(%ebp)	/* inc event counter */		;\
280	adcl	$0,IS_EVCNTHI(%ebp)					;\
2811:									\
282	pushl	%esi							;\
283	movl	%ebx,CPUVAR(ILEVEL)					;\
284	sti								;\
285	incl	CPUVAR(IDEPTH)						;\
286	movl	IS_HANDLERS(%ebp),%ebx					;\
287	LOCK_KERNEL							;\
2886:									\
289	movl	IH_LEVEL(%ebx),%edi					;\
290	cmpl	%esi,%edi						;\
291	jle	7f							;\
292	pushl	IH_ARG(%ebx)						;\
293	movl	%edi,CPUVAR(ILEVEL)					;\
294	call	*IH_FUN(%ebx)		/* call it */			;\
295	addl	$4,%esp			/* toss the arg */		;\
296	level_check(num)						;\
297	movl	IH_NEXT(%ebx),%ebx	/* next handler in chain */	;\
298	testl	%ebx,%ebx						;\
299	jnz	6b							;\
3005:									\
301	UNLOCK_KERNEL							;\
302	unmask(num)			/* unmask it in hardware */	;\
303	late_ack(num)							;\
304	sti								;\
305	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
3067:									\
307	UNLOCK_KERNEL							;\
308	orl     $(1 << num),CPUVAR(IPENDING)				;\
309	late_ack(num)							;\
310	sti								;\
311	jmp	_C_LABEL(Xdoreti)	/* lower spl and do ASTs */	;\
31210:									\
313	orl     $(1 << num),CPUVAR(IPENDING)				;\
314	late_ack(num)							;\
315	sti								;\
316	INTRFASTEXIT							;\
3179:									\
318	unmask(num)							;\
319	late_ack(num)							;\
320	sti								;\
321	INTRFASTEXIT
322
323#define ICUADDR IO_ICU1
324
325INTRSTUB(legacy,0,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
326    voidop)
327INTRSTUB(legacy,1,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
328    voidop)
329INTRSTUB(legacy,2,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
330    voidop)
331INTRSTUB(legacy,3,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
332    voidop)
333INTRSTUB(legacy,4,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
334    voidop)
335INTRSTUB(legacy,5,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
336    voidop)
337INTRSTUB(legacy,6,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
338    voidop)
339INTRSTUB(legacy,7,i8259_asm_ack1,voidop,i8259_asm_mask,i8259_asm_unmask,
340    voidop)
341#undef ICUADDR
342#define ICUADDR IO_ICU2
343
344INTRSTUB(legacy,8,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
345    voidop)
346INTRSTUB(legacy,9,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
347    voidop)
348INTRSTUB(legacy,10,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
349    voidop)
350INTRSTUB(legacy,11,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
351    voidop)
352INTRSTUB(legacy,12,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
353    voidop)
354INTRSTUB(legacy,13,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
355    voidop)
356INTRSTUB(legacy,14,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
357    voidop)
358INTRSTUB(legacy,15,i8259_asm_ack2,voidop,i8259_asm_mask,i8259_asm_unmask,
359    voidop)
360
361#if NIOAPIC > 0
362
363INTRSTUB(ioapic,0,voidop,ioapic_asm_ack,voidop,voidop,voidop)
364INTRSTUB(ioapic,1,voidop,ioapic_asm_ack,voidop,voidop,voidop)
365INTRSTUB(ioapic,2,voidop,ioapic_asm_ack,voidop,voidop,voidop)
366INTRSTUB(ioapic,3,voidop,ioapic_asm_ack,voidop,voidop,voidop)
367INTRSTUB(ioapic,4,voidop,ioapic_asm_ack,voidop,voidop,voidop)
368INTRSTUB(ioapic,5,voidop,ioapic_asm_ack,voidop,voidop,voidop)
369INTRSTUB(ioapic,6,voidop,ioapic_asm_ack,voidop,voidop,voidop)
370INTRSTUB(ioapic,7,voidop,ioapic_asm_ack,voidop,voidop,voidop)
371INTRSTUB(ioapic,8,voidop,ioapic_asm_ack,voidop,voidop,voidop)
372INTRSTUB(ioapic,9,voidop,ioapic_asm_ack,voidop,voidop,voidop)
373INTRSTUB(ioapic,10,voidop,ioapic_asm_ack,voidop,voidop,voidop)
374INTRSTUB(ioapic,11,voidop,ioapic_asm_ack,voidop,voidop,voidop)
375INTRSTUB(ioapic,12,voidop,ioapic_asm_ack,voidop,voidop,voidop)
376INTRSTUB(ioapic,13,voidop,ioapic_asm_ack,voidop,voidop,voidop)
377INTRSTUB(ioapic,14,voidop,ioapic_asm_ack,voidop,voidop,voidop)
378INTRSTUB(ioapic,15,voidop,ioapic_asm_ack,voidop,voidop,voidop)
379INTRSTUB(ioapic,16,voidop,ioapic_asm_ack,voidop,voidop,voidop)
380INTRSTUB(ioapic,17,voidop,ioapic_asm_ack,voidop,voidop,voidop)
381INTRSTUB(ioapic,18,voidop,ioapic_asm_ack,voidop,voidop,voidop)
382INTRSTUB(ioapic,19,voidop,ioapic_asm_ack,voidop,voidop,voidop)
383INTRSTUB(ioapic,20,voidop,ioapic_asm_ack,voidop,voidop,voidop)
384INTRSTUB(ioapic,21,voidop,ioapic_asm_ack,voidop,voidop,voidop)
385INTRSTUB(ioapic,22,voidop,ioapic_asm_ack,voidop,voidop,voidop)
386INTRSTUB(ioapic,23,voidop,ioapic_asm_ack,voidop,voidop,voidop)
387INTRSTUB(ioapic,24,voidop,ioapic_asm_ack,voidop,voidop,voidop)
388INTRSTUB(ioapic,25,voidop,ioapic_asm_ack,voidop,voidop,voidop)
389INTRSTUB(ioapic,26,voidop,ioapic_asm_ack,voidop,voidop,voidop)
390INTRSTUB(ioapic,27,voidop,ioapic_asm_ack,voidop,voidop,voidop)
391INTRSTUB(ioapic,28,voidop,ioapic_asm_ack,voidop,voidop,voidop)
392INTRSTUB(ioapic,29,voidop,ioapic_asm_ack,voidop,voidop,voidop)
393INTRSTUB(ioapic,30,voidop,ioapic_asm_ack,voidop,voidop,voidop)
394INTRSTUB(ioapic,31,voidop,ioapic_asm_ack,voidop,voidop,voidop)
395
396#endif
397
398.globl _C_LABEL(i8259_stubs)
399_C_LABEL(i8259_stubs):
400	.long _C_LABEL(Xintr_legacy0), _C_LABEL(Xrecurse_legacy0)
401	.long _C_LABEL(Xresume_legacy0)
402	.long _C_LABEL(Xintr_legacy1), _C_LABEL(Xrecurse_legacy1)
403	.long _C_LABEL(Xresume_legacy1)
404	.long _C_LABEL(Xintr_legacy2), _C_LABEL(Xrecurse_legacy2)
405	.long _C_LABEL(Xresume_legacy2)
406	.long _C_LABEL(Xintr_legacy3), _C_LABEL(Xrecurse_legacy3)
407	.long _C_LABEL(Xresume_legacy3)
408	.long _C_LABEL(Xintr_legacy4), _C_LABEL(Xrecurse_legacy4)
409	.long _C_LABEL(Xresume_legacy4)
410	.long _C_LABEL(Xintr_legacy5), _C_LABEL(Xrecurse_legacy5)
411	.long _C_LABEL(Xresume_legacy5)
412	.long _C_LABEL(Xintr_legacy6), _C_LABEL(Xrecurse_legacy6)
413	.long _C_LABEL(Xresume_legacy6)
414	.long _C_LABEL(Xintr_legacy7), _C_LABEL(Xrecurse_legacy7)
415	.long _C_LABEL(Xresume_legacy7)
416	.long _C_LABEL(Xintr_legacy8), _C_LABEL(Xrecurse_legacy8)
417	.long _C_LABEL(Xresume_legacy8)
418	.long _C_LABEL(Xintr_legacy9), _C_LABEL(Xrecurse_legacy9)
419	.long _C_LABEL(Xresume_legacy9)
420	.long _C_LABEL(Xintr_legacy10), _C_LABEL(Xrecurse_legacy10)
421	.long _C_LABEL(Xresume_legacy10)
422	.long _C_LABEL(Xintr_legacy11), _C_LABEL(Xrecurse_legacy11)
423	.long _C_LABEL(Xresume_legacy11)
424	.long _C_LABEL(Xintr_legacy12), _C_LABEL(Xrecurse_legacy12)
425	.long _C_LABEL(Xresume_legacy12)
426	.long _C_LABEL(Xintr_legacy13), _C_LABEL(Xrecurse_legacy13)
427	.long _C_LABEL(Xresume_legacy13)
428	.long _C_LABEL(Xintr_legacy14), _C_LABEL(Xrecurse_legacy14)
429	.long _C_LABEL(Xresume_legacy14)
430	.long _C_LABEL(Xintr_legacy15), _C_LABEL(Xrecurse_legacy15)
431	.long _C_LABEL(Xresume_legacy15)
432
433#if NIOAPIC > 0
434.globl _C_LABEL(ioapic_stubs)
435_C_LABEL(ioapic_stubs):
436	.long _C_LABEL(Xintr_ioapic0), _C_LABEL(Xrecurse_ioapic0)
437	.long _C_LABEL(Xresume_ioapic0)
438	.long _C_LABEL(Xintr_ioapic1), _C_LABEL(Xrecurse_ioapic1)
439	.long _C_LABEL(Xresume_ioapic1)
440	.long _C_LABEL(Xintr_ioapic2), _C_LABEL(Xrecurse_ioapic2)
441	.long _C_LABEL(Xresume_ioapic2)
442	.long _C_LABEL(Xintr_ioapic3), _C_LABEL(Xrecurse_ioapic3)
443	.long _C_LABEL(Xresume_ioapic3)
444	.long _C_LABEL(Xintr_ioapic4), _C_LABEL(Xrecurse_ioapic4)
445	.long _C_LABEL(Xresume_ioapic4)
446	.long _C_LABEL(Xintr_ioapic5), _C_LABEL(Xrecurse_ioapic5)
447	.long _C_LABEL(Xresume_ioapic5)
448	.long _C_LABEL(Xintr_ioapic6), _C_LABEL(Xrecurse_ioapic6)
449	.long _C_LABEL(Xresume_ioapic6)
450	.long _C_LABEL(Xintr_ioapic7), _C_LABEL(Xrecurse_ioapic7)
451	.long _C_LABEL(Xresume_ioapic7)
452	.long _C_LABEL(Xintr_ioapic8), _C_LABEL(Xrecurse_ioapic8)
453	.long _C_LABEL(Xresume_ioapic8)
454	.long _C_LABEL(Xintr_ioapic9), _C_LABEL(Xrecurse_ioapic9)
455	.long _C_LABEL(Xresume_ioapic9)
456	.long _C_LABEL(Xintr_ioapic10), _C_LABEL(Xrecurse_ioapic10)
457	.long _C_LABEL(Xresume_ioapic10)
458	.long _C_LABEL(Xintr_ioapic11), _C_LABEL(Xrecurse_ioapic11)
459	.long _C_LABEL(Xresume_ioapic11)
460	.long _C_LABEL(Xintr_ioapic12), _C_LABEL(Xrecurse_ioapic12)
461	.long _C_LABEL(Xresume_ioapic12)
462	.long _C_LABEL(Xintr_ioapic13), _C_LABEL(Xrecurse_ioapic13)
463	.long _C_LABEL(Xresume_ioapic13)
464	.long _C_LABEL(Xintr_ioapic14), _C_LABEL(Xrecurse_ioapic14)
465	.long _C_LABEL(Xresume_ioapic14)
466	.long _C_LABEL(Xintr_ioapic15), _C_LABEL(Xrecurse_ioapic15)
467	.long _C_LABEL(Xresume_ioapic15)
468	.long _C_LABEL(Xintr_ioapic16), _C_LABEL(Xrecurse_ioapic16)
469	.long _C_LABEL(Xresume_ioapic16)
470	.long _C_LABEL(Xintr_ioapic17), _C_LABEL(Xrecurse_ioapic17)
471	.long _C_LABEL(Xresume_ioapic17)
472	.long _C_LABEL(Xintr_ioapic18), _C_LABEL(Xrecurse_ioapic18)
473	.long _C_LABEL(Xresume_ioapic18)
474	.long _C_LABEL(Xintr_ioapic19), _C_LABEL(Xrecurse_ioapic19)
475	.long _C_LABEL(Xresume_ioapic19)
476	.long _C_LABEL(Xintr_ioapic20), _C_LABEL(Xrecurse_ioapic20)
477	.long _C_LABEL(Xresume_ioapic20)
478	.long _C_LABEL(Xintr_ioapic21), _C_LABEL(Xrecurse_ioapic21)
479	.long _C_LABEL(Xresume_ioapic21)
480	.long _C_LABEL(Xintr_ioapic22), _C_LABEL(Xrecurse_ioapic22)
481	.long _C_LABEL(Xresume_ioapic22)
482	.long _C_LABEL(Xintr_ioapic23), _C_LABEL(Xrecurse_ioapic23)
483	.long _C_LABEL(Xresume_ioapic23)
484	.long _C_LABEL(Xintr_ioapic24), _C_LABEL(Xrecurse_ioapic24)
485	.long _C_LABEL(Xresume_ioapic24)
486	.long _C_LABEL(Xintr_ioapic25), _C_LABEL(Xrecurse_ioapic25)
487	.long _C_LABEL(Xresume_ioapic25)
488	.long _C_LABEL(Xintr_ioapic26), _C_LABEL(Xrecurse_ioapic26)
489	.long _C_LABEL(Xresume_ioapic26)
490	.long _C_LABEL(Xintr_ioapic27), _C_LABEL(Xrecurse_ioapic27)
491	.long _C_LABEL(Xresume_ioapic27)
492	.long _C_LABEL(Xintr_ioapic28), _C_LABEL(Xrecurse_ioapic28)
493	.long _C_LABEL(Xresume_ioapic28)
494	.long _C_LABEL(Xintr_ioapic29), _C_LABEL(Xrecurse_ioapic29)
495	.long _C_LABEL(Xresume_ioapic29)
496	.long _C_LABEL(Xintr_ioapic30), _C_LABEL(Xrecurse_ioapic30)
497	.long _C_LABEL(Xresume_ioapic30)
498	.long _C_LABEL(Xintr_ioapic31), _C_LABEL(Xrecurse_ioapic31)
499	.long _C_LABEL(Xresume_ioapic31)
500#endif
501
502/*
503 * Symbols that vmstat -i wants, even though they're not used.
504 */
505.globl	_C_LABEL(intrnames)
506_C_LABEL(intrnames):
507.globl	_C_LABEL(eintrnames)
508_C_LABEL(eintrnames):
509
510.globl	_C_LABEL(intrcnt)
511_C_LABEL(intrcnt):
512.globl	_C_LABEL(eintrcnt)
513_C_LABEL(eintrcnt):
514
515/*
516 * Soft interrupt handlers
517 */
518
519IDTVEC(softserial)
520	movl	$IPL_SOFTSERIAL, CPUVAR(ILEVEL)
521	incl	CPUVAR(IDEPTH)
522#ifdef MULTIPROCESSOR
523	call	_C_LABEL(i386_softintlock)
524#endif
525	movl	CPUVAR(ISOURCES) + SIR_SERIAL * 4, %edi
526	addl	$1,IS_EVCNTLO(%edi)
527	adcl	$0,IS_EVCNTHI(%edi)
528	pushl	$I386_SOFTINTR_SOFTSERIAL
529	call	_C_LABEL(softintr_dispatch)
530	addl	$4,%esp
531#ifdef MULTIPROCESSOR
532	call	_C_LABEL(i386_softintunlock)
533#endif
534	decl	CPUVAR(IDEPTH)
535	movl	%ebx,CPUVAR(ILEVEL)
536	jmp	*%esi
537
538IDTVEC(softnet)
539	movl	$IPL_SOFTNET, CPUVAR(ILEVEL)
540	incl	CPUVAR(IDEPTH)
541#ifdef MULTIPROCESSOR
542	call	_C_LABEL(i386_softintlock)
543#endif
544	movl	CPUVAR(ISOURCES) + SIR_NET * 4, %edi
545	addl	$1,IS_EVCNTLO(%edi)
546	adcl	$0,IS_EVCNTHI(%edi)
547
548	xorl	%edi,%edi
549	xchgl	_C_LABEL(netisr),%edi
550
551	/* XXX Do the legacy netisrs here for now. */
552#define DONETISR(s, c) \
553	.globl  _C_LABEL(c)	;\
554	testl	$(1 << s),%edi	;\
555	jz	1f		;\
556	call	_C_LABEL(c)	;\
5571:
558#include <net/netisr_dispatch.h>
559
560	pushl	$I386_SOFTINTR_SOFTNET
561	call	_C_LABEL(softintr_dispatch)
562	addl	$4,%esp
563#ifdef MULTIPROCESSOR
564	call	_C_LABEL(i386_softintunlock)
565#endif
566	decl	CPUVAR(IDEPTH)
567	movl	%ebx,CPUVAR(ILEVEL)
568	jmp	*%esi
569
570IDTVEC(softclock)
571	movl	$IPL_SOFTCLOCK, CPUVAR(ILEVEL)
572	incl	CPUVAR(IDEPTH)
573#ifdef MULTIPROCESSOR
574	call	_C_LABEL(i386_softintlock)
575#endif
576	movl	CPUVAR(ISOURCES) + SIR_CLOCK * 4, %edi
577	addl	$1,IS_EVCNTLO(%edi)
578	adcl	$0,IS_EVCNTHI(%edi)
579
580	pushl	$I386_SOFTINTR_SOFTCLOCK
581	call	_C_LABEL(softintr_dispatch)
582	addl	$4,%esp
583#ifdef MULTIPROCESSOR
584	call	_C_LABEL(i386_softintunlock)
585#endif
586	decl	CPUVAR(IDEPTH)
587	movl	%ebx,CPUVAR(ILEVEL)
588	jmp	*%esi
589
590/*
591 * Trap and fault vector routines
592 *
593 * On exit from the kernel to user mode, we always need to check for ASTs.  In
594 * addition, we need to do this atomically; otherwise an interrupt may occur
595 * which causes an AST, but it won't get processed until the next kernel entry
596 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
597 * and only enable them again on the final `iret' or before calling the AST
598 * handler.
599 */
600
601#define TRAP(a)			pushl $(a) ; jmp _C_LABEL(alltraps)
602#define ZTRAP(a)		pushl $0 ; TRAP(a)
603
604#ifdef IPKDB
605#define BPTTRAP(a)	pushl $0; pushl $(a); jmp _C_LABEL(bpttraps)
606#else
607#define BPTTRAP(a)	ZTRAP(a)
608#endif
609
610
611	.text
612IDTVEC(trap00)
613	ZTRAP(T_DIVIDE)
614IDTVEC(trap01)
615	BPTTRAP(T_TRCTRAP)
616IDTVEC(trap02)
617	ZTRAP(T_NMI)
618IDTVEC(trap03)
619	BPTTRAP(T_BPTFLT)
620IDTVEC(trap04)
621	ZTRAP(T_OFLOW)
622IDTVEC(trap05)
623	ZTRAP(T_BOUND)
624IDTVEC(trap06)
625	ZTRAP(T_PRIVINFLT)
626IDTVEC(trap07)
627#if NNPX > 0
628	pushl	$0			# dummy error code
629	pushl	$T_DNA
630	INTRENTRY
631	pushl	CPUVAR(SELF)
632	call	*_C_LABEL(npxdna_func)
633	addl	$4,%esp
634	testl	%eax,%eax
635	jz	calltrap
636	INTRFASTEXIT
637#else
638	ZTRAP(T_DNA)
639#endif
640IDTVEC(trap08)
641	ZTRAP(T_DOUBLEFLT)
642IDTVEC(trap09)
643	ZTRAP(T_FPOPFLT)
644IDTVEC(trap0a)
645	TRAP(T_TSSFLT)
646IDTVEC(trap0b)
647	TRAP(T_SEGNPFLT)
648IDTVEC(trap0c)
649	TRAP(T_STKFLT)
650IDTVEC(trap0d)
651	TRAP(T_PROTFLT)
652IDTVEC(trap0e)
653#ifndef I586_CPU
654	TRAP(T_PAGEFLT)
655#else
656	pushl	$T_PAGEFLT
657	INTRENTRY
658	testb	$PGEX_U,TF_ERR(%esp)
659	jnz	calltrap
660	movl	%cr2,%eax
661	subl	_C_LABEL(pentium_idt),%eax
662	cmpl	$(6*8),%eax
663	jne	calltrap
664	movb	$T_PRIVINFLT,TF_TRAPNO(%esp)
665	jmp	calltrap
666#endif
667
668IDTVEC(intrspurious)
669IDTVEC(trap0f)
670	/*
671	 * The Pentium Pro local APIC may erroneously call this vector for a
672	 * default IR7.  Just ignore it.
673	 *
674	 * (The local APIC does this when CPL is raised while it's on the
675	 * way to delivering an interrupt.. presumably enough has been set
676	 * up that it's inconvenient to abort delivery completely..)
677	 */
678	iret
679
680IDTVEC(trap10)
681#if NNPX > 0
682	/*
683	 * Handle like an interrupt so that we can call npxintr to clear the
684	 * error.  It would be better to handle npx interrupts as traps but
685	 * this is difficult for nested interrupts.
686	 */
687	pushl	$0			# dummy error code
688	pushl	$T_ASTFLT
689	INTRENTRY
690	pushl	CPUVAR(ILEVEL)
691	pushl	%esp
692	incl	_C_LABEL(uvmexp)+V_TRAP
693	call	_C_LABEL(npxintr)
694	addl	$8,%esp
695	INTRFASTEXIT
696#else
697	ZTRAP(T_ARITHTRAP)
698#endif
699IDTVEC(trap11)
700	ZTRAP(T_ALIGNFLT)
701IDTVEC(trap12)
702IDTVEC(trap13)
703IDTVEC(trap14)
704IDTVEC(trap15)
705IDTVEC(trap16)
706IDTVEC(trap17)
707IDTVEC(trap18)
708IDTVEC(trap19)
709IDTVEC(trap1a)
710IDTVEC(trap1b)
711IDTVEC(trap1c)
712IDTVEC(trap1d)
713IDTVEC(trap1e)
714IDTVEC(trap1f)
715	/* 18 - 31 reserved for future exp */
716	ZTRAP(T_RESERVED)
717
718IDTVEC(exceptions)
719	.long	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
720	.long	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
721	.long	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
722	.long	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
723	.long	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
724	.long	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
725	.long	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
726	.long	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
727	.long	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
728	.long	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
729	.long	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
730	.long	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
731	.long	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
732	.long	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
733	.long	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
734	.long	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
735
736
737IDTVEC(tss_trap08)
7381:
739	str	%ax
740	GET_TSS
741	movzwl	(%eax),%eax
742	GET_TSS
743	pushl	$T_DOUBLEFLT
744	pushl	%eax
745	call	_C_LABEL(trap_tss)
746	addl	$12,%esp
747	iret
748	jmp	1b
749
750/* LINTSTUB: Ignore */
751NENTRY(alltraps)
752	INTRENTRY
753calltrap:
754#ifdef DIAGNOSTIC
755	movl	CPUVAR(ILEVEL),%ebx
756#endif /* DIAGNOSTIC */
757	call	_C_LABEL(trap)
7582:	/* Check for ASTs on exit to user mode. */
759	cli
760	CHECK_ASTPENDING()
761	je	1f
762	testb	$SEL_RPL,TF_CS(%esp)
763#ifdef VM86
764	jnz	5f
765	testl	$PSL_VM,TF_EFLAGS(%esp)
766#endif
767	jz	1f
7685:	CLEAR_ASTPENDING()
769	sti
770	movl	$T_ASTFLT,TF_TRAPNO(%esp)
771	call	_C_LABEL(trap)
772	jmp	2b
773#ifndef DIAGNOSTIC
7741:	INTRFASTEXIT
775#else
7761:	cmpl	CPUVAR(ILEVEL),%ebx
777	jne	3f
778	INTRFASTEXIT
7793:	sti
780	pushl	$4f
781	call	_C_LABEL(printf)
782	addl	$4,%esp
783#ifdef DDB
784	int	$3
785#endif /* DDB */
786	movl	%ebx,CPUVAR(ILEVEL)
787	jmp	2b
7884:	.asciz	"WARNING: SPL NOT LOWERED ON TRAP EXIT\n"
789#endif /* DIAGNOSTIC */
790
791#ifdef IPKDB
792/* LINTSTUB: Ignore */
793NENTRY(bpttraps)
794	INTRENTRY
795	call	_C_LABEL(ipkdb_trap_glue)
796	testl	%eax,%eax
797	jz	calltrap
798	INTRFASTEXIT
799
800ipkdbsetup:
801	popl	%ecx
802
803	/* Disable write protection: */
804	movl	%cr0,%eax
805	pushl	%eax
806	andl	$~CR0_WP,%eax
807	movl	%eax,%cr0
808
809	/* Substitute Protection & Page Fault handlers: */
810	movl	_C_LABEL(idt),%edx
811	pushl	13*8(%edx)
812	pushl	13*8+4(%edx)
813	pushl	14*8(%edx)
814	pushl	14*8+4(%edx)
815	movl	$fault,%eax
816	movw	%ax,13*8(%edx)
817	movw	%ax,14*8(%edx)
818	shrl	$16,%eax
819	movw	%ax,13*8+6(%edx)
820	movw	%ax,14*8+6(%edx)
821
822	pushl	%ecx
823	ret
824
825ipkdbrestore:
826	popl	%ecx
827
828	/* Restore Protection & Page Fault handlers: */
829	movl	_C_LABEL(idt),%edx
830	popl	14*8+4(%edx)
831	popl	14*8(%edx)
832	popl	13*8+4(%edx)
833	popl	13*8(%edx)
834
835	/* Restore write protection: */
836	popl	%edx
837	movl	%edx,%cr0
838
839	pushl	%ecx
840	ret
841#endif /* IPKDB */
842
843
844/*
845 * If an error is detected during trap, syscall, or interrupt exit, trap() will
846 * change %eip to point to one of these labels.  We clean up the stack, if
847 * necessary, and resume as if we were handling a general protection fault.
848 * This will cause the process to get a SIGBUS.
849 */
850/* LINTSTUB: Var: char resume_iret[1]; */
851NENTRY(resume_iret)
852	ZTRAP(T_PROTFLT)
853/* LINTSTUB: Var: char resume_pop_ds[1]; */
854NENTRY(resume_pop_ds)
855	pushl	%es
856	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax
857	movw	%ax,%es
858/* LINTSTUB: Var: char resume_pop_es[1]; */
859NENTRY(resume_pop_es)
860	pushl	%fs
861	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax
862	movw	%ax,%fs
863/* LINTSTUB: Var: char resume_pop_fs[1]; */
864NENTRY(resume_pop_fs)
865	pushl	%gs
866	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax
867	movw	%ax,%gs
868/* LINTSTUB: Var: char resume_pop_gs[1]; */
869NENTRY(resume_pop_gs)
870	movl	$T_PROTFLT,TF_TRAPNO(%esp)
871	jmp	calltrap
872
873#ifdef IPKDB
874/* LINTSTUB: Func: int ipkdbfbyte(u_char *c) */
875NENTRY(ipkdbfbyte)
876	pushl	%ebp
877	movl	%esp,%ebp
878	call	ipkdbsetup
879	movl	8(%ebp),%edx
880	movzbl	(%edx),%eax
881faultexit:
882	call	ipkdbrestore
883	popl	%ebp
884	ret
885
886/* LINTSTUB: Func: int ipkdbsbyte(u_char *c, int i) */
887NENTRY(ipkdbsbyte)
888	pushl	%ebp
889	movl	%esp,%ebp
890	call	ipkdbsetup
891	movl	8(%ebp),%edx
892	movl	12(%ebp),%eax
893	movb	%al,(%edx)
894	call	ipkdbrestore
895	popl	%ebp
896	ret
897
898fault:
899	popl	%eax		/* error code */
900	movl	$faultexit,%eax
901	movl	%eax,(%esp)
902	movl	$-1,%eax
903	iret
904#endif	/* IPKDB */
905