xref: /dragonfly/sys/platform/pc64/apic/apic_vector.s (revision 927da715)
1/*
2 * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific, prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * from: vector.s, 386BSD 0.1 unknown origin
32 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
33 * $DragonFly: src/sys/platform/pc64/apic/apic_vector.s,v 1.1 2008/08/29 17:07:12 dillon Exp $
34 */
35
36#include "use_npx.h"
37#include "opt_auto_eoi.h"
38
39#include <machine/asmacros.h>
40#include <machine/lock.h>
41#include <machine/psl.h>
42#include <machine/trap.h>
43
44#include <machine_base/icu/icu.h>
45#include <bus/isa/i386/isa.h>
46
47#include "assym.s"
48
49#include "apicreg.h"
50#include "apic_ipl.h"
51#include <machine/smp.h>
52#include <machine_base/isa/intr_machdep.h>
53
54/* convert an absolute IRQ# into a bitmask */
55#define IRQ_LBIT(irq_num)	(1 << (irq_num))
56
57/* make an index into the IO APIC from the IRQ# */
58#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
59
60#ifdef SMP
61#define MPLOCKED     lock ;
62#else
63#define MPLOCKED
64#endif
65
66/*
67 * Push an interrupt frame in a format acceptable to doreti, reload
68 * the segment registers for the kernel.
69 */
70#define PUSH_FRAME							\
71	pushl	$0 ;		/* dummy error code */			\
72	pushl	$0 ;		/* dummy trap type */			\
73	pushl	$0 ;		/* dummy xflags type */			\
74	pushal ;							\
75	pushl	%ds ;		/* save data and extra segments ... */	\
76	pushl	%es ;							\
77	pushl	%fs ;							\
78	pushl	%gs ;							\
79	cld ;								\
80	mov	$KDSEL,%ax ;						\
81	mov	%ax,%ds ;						\
82	mov	%ax,%es ;						\
83	mov	%ax,%gs ;						\
84	mov	$KPSEL,%ax ;						\
85	mov	%ax,%fs ;						\
86
87#define PUSH_DUMMY							\
88	pushfl ;		/* phys int frame / flags */		\
89	pushl %cs ;		/* phys int frame / cs */		\
90	pushl	12(%esp) ;	/* original caller eip */		\
91	pushl	$0 ;		/* dummy error code */			\
92	pushl	$0 ;		/* dummy trap type */			\
93	pushl	$0 ;		/* dummy xflags type */			\
94	subl	$13*4,%esp ;	/* pushal + 4 seg regs (dummy) + CPL */	\
95
96/*
97 * Warning: POP_FRAME can only be used if there is no chance of a
98 * segment register being changed (e.g. by procfs), which is why syscalls
99 * have to use doreti.
100 */
101#define POP_FRAME							\
102	popl	%gs ;							\
103	popl	%fs ;							\
104	popl	%es ;							\
105	popl	%ds ;							\
106	popal ;								\
107	addl	$3*4,%esp ;	/* dummy xflags, trap & error codes */	\
108
109#define POP_DUMMY							\
110	addl	$19*4,%esp ;						\
111
112#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
113#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
114
115#define MASK_IRQ(irq_num)						\
116	APIC_IMASK_LOCK ;			/* into critical reg */	\
117	testl	$IRQ_LBIT(irq_num), apic_imen ;				\
118	jne	7f ;			/* masked, don't mask */	\
119	orl	$IRQ_LBIT(irq_num), apic_imen ;	/* set the mask bit */	\
120	movl	IOAPICADDR(irq_num), %ecx ;	/* ioapic addr */	\
121	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
122	movl	%eax, (%ecx) ;			/* write the index */	\
123	movl	IOAPIC_WINDOW(%ecx), %eax ;	/* current value */	\
124	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
125	movl	%eax, IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1267: ;						/* already masked */	\
127	APIC_IMASK_UNLOCK ;						\
128
129/*
130 * Test to see whether we are handling an edge or level triggered INT.
131 *  Level-triggered INTs must still be masked as we don't clear the source,
132 *  and the EOI cycle would cause redundant INTs to occur.
133 */
134#define MASK_LEVEL_IRQ(irq_num)						\
135	testl	$IRQ_LBIT(irq_num), apic_pin_trigger ;			\
136	jz	9f ;				/* edge, don't mask */	\
137	MASK_IRQ(irq_num) ;						\
1389: ;									\
139
140/*
141 * Test to see if the source is currntly masked, clear if so.
142 */
143#define UNMASK_IRQ(irq_num)					\
144	cmpl	$0,%eax ;						\
145	jnz	8f ;							\
146	APIC_IMASK_LOCK ;			/* into critical reg */	\
147	testl	$IRQ_LBIT(irq_num), apic_imen ;				\
148	je	7f ;			/* bit clear, not masked */	\
149	andl	$~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */	\
150	movl	IOAPICADDR(irq_num),%ecx ;	/* ioapic addr */	\
151	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
152	movl	%eax,(%ecx) ;			/* write the index */	\
153	movl	IOAPIC_WINDOW(%ecx),%eax ;	/* current value */	\
154	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
155	movl	%eax,IOAPIC_WINDOW(%ecx) ;	/* new value */		\
1567: ;									\
157	APIC_IMASK_UNLOCK ;						\
1588: ;									\
159
160#ifdef APIC_IO
161
162/*
163 * Fast interrupt call handlers run in the following sequence:
164 *
165 *	- Push the trap frame required by doreti
166 *	- Mask the interrupt and reenable its source
167 *	- If we cannot take the interrupt set its fpending bit and
168 *	  doreti.  Note that we cannot mess with mp_lock at all
169 *	  if we entered from a critical section!
170 *	- If we can take the interrupt clear its fpending bit,
171 *	  call the handler, then unmask and doreti.
172 *
173 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
174 */
175
176#define	FAST_INTR(irq_num, vec_name)					\
177	.text ;								\
178	SUPERALIGN_TEXT ;						\
179IDTVEC(vec_name) ;							\
180	PUSH_FRAME ;							\
181	FAKE_MCOUNT(15*4(%esp)) ;					\
182	MASK_LEVEL_IRQ(irq_num) ;					\
183	movl	$0, lapic_eoi ;						\
184	movl	PCPU(curthread),%ebx ;					\
185	movl	$0,%eax ;	/* CURRENT CPL IN FRAME (REMOVED) */	\
186	pushl	%eax ;							\
187	testl	$-1,TD_NEST_COUNT(%ebx) ;				\
188	jne	1f ;							\
189	cmpl	$TDPRI_CRIT,TD_PRI(%ebx) ;				\
190	jl	2f ;							\
1911: ;									\
192	/* in critical section, make interrupt pending */		\
193	/* set the pending bit and return, leave interrupt masked */	\
194	orl	$IRQ_LBIT(irq_num),PCPU(fpending) ;			\
195	orl	$RQF_INTPEND,PCPU(reqflags) ;				\
196	jmp	5f ;							\
1972: ;									\
198	/* clear pending bit, run handler */				\
199	andl	$~IRQ_LBIT(irq_num),PCPU(fpending) ;			\
200	pushl	$irq_num ;						\
201	pushl	%esp ;			 /* pass frame by reference */	\
202	call	ithread_fast_handler ;	 /* returns 0 to unmask */	\
203	addl	$8, %esp ;						\
204	UNMASK_IRQ(irq_num) ;						\
2055: ;									\
206	MEXITCOUNT ;							\
207	jmp	doreti ;						\
208
209/*
210 * Slow interrupt call handlers run in the following sequence:
211 *
212 *	- Push the trap frame required by doreti.
213 *	- Mask the interrupt and reenable its source.
214 *	- If we cannot take the interrupt set its ipending bit and
215 *	  doreti.  In addition to checking for a critical section
216 *	  and cpl mask we also check to see if the thread is still
217 *	  running.  Note that we cannot mess with mp_lock at all
218 *	  if we entered from a critical section!
219 *	- If we can take the interrupt clear its ipending bit
220 *	  and schedule the thread.  Leave interrupts masked and doreti.
221 *
222 *	Note that calls to sched_ithd() are made with interrupts enabled
223 *	and outside a critical section.  YYY sched_ithd may preempt us
224 *	synchronously (fix interrupt stacking).
225 *
226 *	YYY can cache gd base pointer instead of using hidden %fs
227 *	prefixes.
228 */
229
230#define SLOW_INTR(irq_num, vec_name, maybe_extra_ipending)		\
231	.text ;								\
232	SUPERALIGN_TEXT ;						\
233IDTVEC(vec_name) ;							\
234	PUSH_FRAME ;							\
235	maybe_extra_ipending ;						\
236;									\
237	MASK_LEVEL_IRQ(irq_num) ;					\
238	incl	PCPU(cnt) + V_INTR ;					\
239	movl	$0, lapic_eoi ;						\
240	movl	PCPU(curthread),%ebx ;					\
241	movl	$0,%eax ;	/* CURRENT CPL IN FRAME (REMOVED) */	\
242	pushl	%eax ;		/* cpl do restore */			\
243	testl	$-1,TD_NEST_COUNT(%ebx) ;				\
244	jne	1f ;							\
245	cmpl	$TDPRI_CRIT,TD_PRI(%ebx) ;				\
246	jl	2f ;							\
2471: ;									\
248	/* set the pending bit and return, leave the interrupt masked */ \
249	orl	$IRQ_LBIT(irq_num), PCPU(ipending) ;			\
250	orl	$RQF_INTPEND,PCPU(reqflags) ;				\
251	jmp	5f ;							\
2522: ;									\
253	/* set running bit, clear pending bit, run handler */		\
254	andl	$~IRQ_LBIT(irq_num), PCPU(ipending) ;			\
255	incl	TD_NEST_COUNT(%ebx) ;					\
256	sti ;								\
257	pushl	$irq_num ;						\
258	call	sched_ithd ;						\
259	addl	$4,%esp ;						\
260	cli ;								\
261	decl	TD_NEST_COUNT(%ebx) ;					\
2625: ;									\
263	MEXITCOUNT ;							\
264	jmp	doreti ;						\
265
266/*
267 * Wrong interrupt call handlers.  We program these into APIC vectors
268 * that should otherwise never occur.  For example, we program the SLOW
269 * vector for irq N with this when we program the FAST vector with the
270 * real interrupt.
271 *
272 * XXX for now all we can do is EOI it.  We can't call do_wrongintr
273 * (yet) because we could be in a critical section.
274 */
275#define WRONGINTR(irq_num,vec_name)					\
276	.text ;								\
277	SUPERALIGN_TEXT	 ;						\
278IDTVEC(vec_name) ;							\
279	PUSH_FRAME ;							\
280	movl	$0, lapic_eoi ;	/* End Of Interrupt to APIC */		\
281	/*pushl	$irq_num ;*/						\
282	/*call	do_wrongintr ;*/					\
283	/*addl	$4,%esp ;*/						\
284	POP_FRAME ;							\
285	iret  ;								\
286
287#endif
288
289/*
290 * Handle "spurious INTerrupts".
291 * Notes:
292 *  This is different than the "spurious INTerrupt" generated by an
293 *   8259 PIC for missing INTs.  See the APIC documentation for details.
294 *  This routine should NOT do an 'EOI' cycle.
295 */
296	.text
297	SUPERALIGN_TEXT
298	.globl Xspuriousint
299Xspuriousint:
300
301	/* No EOI cycle used here */
302
303	iret
304
305
306/*
307 * Handle TLB shootdowns.
308 */
309	.text
310	SUPERALIGN_TEXT
311	.globl	Xinvltlb
312Xinvltlb:
313	pushl	%eax
314
315	movl	%cr3, %eax		/* invalidate the TLB */
316	movl	%eax, %cr3
317
318	ss				/* stack segment, avoid %ds load */
319	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
320
321	popl	%eax
322	iret
323
324
325/*
326 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
327 *
328 *  - Signals its receipt.
329 *  - Waits for permission to restart.
330 *  - Processing pending IPIQ events while waiting.
331 *  - Signals its restart.
332 */
333
334	.text
335	SUPERALIGN_TEXT
336	.globl Xcpustop
337Xcpustop:
338	pushl	%ebp
339	movl	%esp, %ebp
340	pushl	%eax
341	pushl	%ecx
342	pushl	%edx
343	pushl	%ds			/* save current data segment */
344	pushl	%fs
345
346	movl	$KDSEL, %eax
347	mov	%ax, %ds		/* use KERNEL data segment */
348	movl	$KPSEL, %eax
349	mov	%ax, %fs
350
351	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
352
353	movl	PCPU(cpuid), %eax
354	imull	$PCB_SIZE, %eax
355	leal	CNAME(stoppcbs)(%eax), %eax
356	pushl	%eax
357	call	CNAME(savectx)		/* Save process context */
358	addl	$4, %esp
359
360
361	movl	PCPU(cpuid), %eax
362
363	/*
364	 * Indicate that we have stopped and loop waiting for permission
365	 * to start again.  We must still process IPI events while in a
366	 * stopped state.
367	 */
368	MPLOCKED
369	btsl	%eax, stopped_cpus	/* stopped_cpus |= (1<<id) */
3701:
371	andl	$~RQF_IPIQ,PCPU(reqflags)
372	pushl	%eax
373	call	lwkt_smp_stopped
374	popl	%eax
375	btl	%eax, started_cpus	/* while (!(started_cpus & (1<<id))) */
376	jnc	1b
377
378	MPLOCKED
379	btrl	%eax, started_cpus	/* started_cpus &= ~(1<<id) */
380	MPLOCKED
381	btrl	%eax, stopped_cpus	/* stopped_cpus &= ~(1<<id) */
382
383	test	%eax, %eax
384	jnz	2f
385
386	movl	CNAME(cpustop_restartfunc), %eax
387	test	%eax, %eax
388	jz	2f
389	movl	$0, CNAME(cpustop_restartfunc)	/* One-shot */
390
391	call	*%eax
3922:
393	popl	%fs
394	popl	%ds			/* restore previous data segment */
395	popl	%edx
396	popl	%ecx
397	popl	%eax
398	movl	%ebp, %esp
399	popl	%ebp
400	iret
401
402	/*
403	 * For now just have one ipiq IPI, but what we really want is
404	 * to have one for each source cpu to the APICs don't get stalled
405	 * backlogging the requests.
406	 */
407	.text
408	SUPERALIGN_TEXT
409	.globl Xipiq
410Xipiq:
411	PUSH_FRAME
412	movl	$0, lapic_eoi		/* End Of Interrupt to APIC */
413	FAKE_MCOUNT(15*4(%esp))
414
415	movl	PCPU(curthread),%ebx
416	cmpl	$TDPRI_CRIT,TD_PRI(%ebx)
417	jge	1f
418	subl	$8,%esp			/* make same as interrupt frame */
419	pushl	%esp			/* pass frame by reference */
420	incl	PCPU(intr_nesting_level)
421	addl	$TDPRI_CRIT,TD_PRI(%ebx)
422	call	lwkt_process_ipiq_frame
423	subl	$TDPRI_CRIT,TD_PRI(%ebx)
424	decl	PCPU(intr_nesting_level)
425	addl	$12,%esp
426	pushl	$0			/* CPL for frame (REMOVED) */
427	MEXITCOUNT
428	jmp	doreti
4291:
430	orl	$RQF_IPIQ,PCPU(reqflags)
431	MEXITCOUNT
432	POP_FRAME
433	iret
434
435#ifdef APIC_IO
436
437MCOUNT_LABEL(bintr)
438	FAST_INTR(0,apic_fastintr0)
439	FAST_INTR(1,apic_fastintr1)
440	FAST_INTR(2,apic_fastintr2)
441	FAST_INTR(3,apic_fastintr3)
442	FAST_INTR(4,apic_fastintr4)
443	FAST_INTR(5,apic_fastintr5)
444	FAST_INTR(6,apic_fastintr6)
445	FAST_INTR(7,apic_fastintr7)
446	FAST_INTR(8,apic_fastintr8)
447	FAST_INTR(9,apic_fastintr9)
448	FAST_INTR(10,apic_fastintr10)
449	FAST_INTR(11,apic_fastintr11)
450	FAST_INTR(12,apic_fastintr12)
451	FAST_INTR(13,apic_fastintr13)
452	FAST_INTR(14,apic_fastintr14)
453	FAST_INTR(15,apic_fastintr15)
454	FAST_INTR(16,apic_fastintr16)
455	FAST_INTR(17,apic_fastintr17)
456	FAST_INTR(18,apic_fastintr18)
457	FAST_INTR(19,apic_fastintr19)
458	FAST_INTR(20,apic_fastintr20)
459	FAST_INTR(21,apic_fastintr21)
460	FAST_INTR(22,apic_fastintr22)
461	FAST_INTR(23,apic_fastintr23)
462
463	/* YYY what is this garbage? */
464
465	SLOW_INTR(0,apic_slowintr0,)
466	SLOW_INTR(1,apic_slowintr1,)
467	SLOW_INTR(2,apic_slowintr2,)
468	SLOW_INTR(3,apic_slowintr3,)
469	SLOW_INTR(4,apic_slowintr4,)
470	SLOW_INTR(5,apic_slowintr5,)
471	SLOW_INTR(6,apic_slowintr6,)
472	SLOW_INTR(7,apic_slowintr7,)
473	SLOW_INTR(8,apic_slowintr8,)
474	SLOW_INTR(9,apic_slowintr9,)
475	SLOW_INTR(10,apic_slowintr10,)
476	SLOW_INTR(11,apic_slowintr11,)
477	SLOW_INTR(12,apic_slowintr12,)
478	SLOW_INTR(13,apic_slowintr13,)
479	SLOW_INTR(14,apic_slowintr14,)
480	SLOW_INTR(15,apic_slowintr15,)
481	SLOW_INTR(16,apic_slowintr16,)
482	SLOW_INTR(17,apic_slowintr17,)
483	SLOW_INTR(18,apic_slowintr18,)
484	SLOW_INTR(19,apic_slowintr19,)
485	SLOW_INTR(20,apic_slowintr20,)
486	SLOW_INTR(21,apic_slowintr21,)
487	SLOW_INTR(22,apic_slowintr22,)
488	SLOW_INTR(23,apic_slowintr23,)
489
490	WRONGINTR(0,apic_wrongintr0)
491	WRONGINTR(1,apic_wrongintr1)
492	WRONGINTR(2,apic_wrongintr2)
493	WRONGINTR(3,apic_wrongintr3)
494	WRONGINTR(4,apic_wrongintr4)
495	WRONGINTR(5,apic_wrongintr5)
496	WRONGINTR(6,apic_wrongintr6)
497	WRONGINTR(7,apic_wrongintr7)
498	WRONGINTR(8,apic_wrongintr8)
499	WRONGINTR(9,apic_wrongintr9)
500	WRONGINTR(10,apic_wrongintr10)
501	WRONGINTR(11,apic_wrongintr11)
502	WRONGINTR(12,apic_wrongintr12)
503	WRONGINTR(13,apic_wrongintr13)
504	WRONGINTR(14,apic_wrongintr14)
505	WRONGINTR(15,apic_wrongintr15)
506	WRONGINTR(16,apic_wrongintr16)
507	WRONGINTR(17,apic_wrongintr17)
508	WRONGINTR(18,apic_wrongintr18)
509	WRONGINTR(19,apic_wrongintr19)
510	WRONGINTR(20,apic_wrongintr20)
511	WRONGINTR(21,apic_wrongintr21)
512	WRONGINTR(22,apic_wrongintr22)
513	WRONGINTR(23,apic_wrongintr23)
514MCOUNT_LABEL(eintr)
515
516#endif
517
518	.data
519
520/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
521	.globl stopped_cpus, started_cpus
522stopped_cpus:
523	.long	0
524started_cpus:
525	.long	0
526
527	.globl CNAME(cpustop_restartfunc)
528CNAME(cpustop_restartfunc):
529	.long 0
530
531	.globl	apic_pin_trigger
532apic_pin_trigger:
533	.long	0
534
535	.text
536
537