xref: /dragonfly/sys/platform/pc64/apic/apic_vector.s (revision e0ecab34)
1/*
2 *	from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $
4 * $DragonFly: src/sys/platform/pc32/apic/apic_vector.s,v 1.39 2008/08/02 01:14:43 dillon Exp $
5 */
6
7#if 0
8#include "use_npx.h"
9#include "opt_auto_eoi.h"
10#endif
11
12#include <machine/asmacros.h>
13#include <machine/lock.h>
14#include <machine/psl.h>
15#include <machine/trap.h>
16#include <machine/segments.h>
17
18#include <machine_base/icu/icu.h>
19#include <bus/isa/isa.h>
20
21#include "assym.s"
22
23#include "apicreg.h"
24#include "apic_ipl.h"
25#include <machine/smp.h>
26#include <machine_base/isa/intr_machdep.h>
27
28/* convert an absolute IRQ# into a bitmask */
29#define IRQ_LBIT(irq_num)	(1 << (irq_num))
30
31/* make an index into the IO APIC from the IRQ# */
32#define REDTBL_IDX(irq_num)	(0x10 + ((irq_num) * 2))
33
34#ifdef SMP
35#define MPLOCKED     lock ;
36#else
37#define MPLOCKED
38#endif
39
40#define APIC_PUSH_FRAME							\
41	PUSH_FRAME ;		/* 15 regs + space for 5 extras */	\
42	movq $0,TF_XFLAGS(%rsp) ;					\
43	movq $0,TF_TRAPNO(%rsp) ;					\
44	movq $0,TF_ADDR(%rsp) ;						\
45	movq $0,TF_FLAGS(%rsp) ;					\
46	movq $0,TF_ERR(%rsp) ;						\
47	cld ;								\
48
49/*
50 * JG stale? Warning: POP_FRAME can only be used if there is no chance of a
51 * segment register being changed (e.g. by procfs), which is why syscalls
52 * have to use doreti.
53 */
54#define APIC_POP_FRAME POP_FRAME
55
56/* sizeof(struct apic_intmapinfo) == 24 */
57#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 8
58#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 24 * (irq_num) + 16
59
60#define MASK_IRQ(irq_num)						\
61	APIC_IMASK_LOCK ;			/* into critical reg */	\
62	testl	$IRQ_LBIT(irq_num), apic_imen ;				\
63	jne	7f ;			/* masked, don't mask */	\
64	orl	$IRQ_LBIT(irq_num), apic_imen ;	/* set the mask bit */	\
65	movq	IOAPICADDR(irq_num), %rcx ;	/* ioapic addr */	\
66	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
67	movl	%eax, (%rcx) ;			/* write the index */	\
68	movl	IOAPIC_WINDOW(%rcx), %eax ;	/* current value */	\
69	orl	$IOART_INTMASK, %eax ;		/* set the mask */	\
70	movl	%eax, IOAPIC_WINDOW(%rcx) ;	/* new value */		\
717: ;						/* already masked */	\
72	APIC_IMASK_UNLOCK ;						\
73
74/*
75 * Test to see whether we are handling an edge or level triggered INT.
76 *  Level-triggered INTs must still be masked as we don't clear the source,
77 *  and the EOI cycle would cause redundant INTs to occur.
78 */
79#define MASK_LEVEL_IRQ(irq_num)						\
80	testl	$IRQ_LBIT(irq_num), apic_pin_trigger ;			\
81	jz	9f ;				/* edge, don't mask */	\
82	MASK_IRQ(irq_num) ;						\
839: ;									\
84
85/*
86 * Test to see if the source is currntly masked, clear if so.
87 */
88#define UNMASK_IRQ(irq_num)					\
89	cmpl	$0,%eax ;						\
90	jnz	8f ;							\
91	APIC_IMASK_LOCK ;			/* into critical reg */	\
92	testl	$IRQ_LBIT(irq_num), apic_imen ;				\
93	je	7f ;			/* bit clear, not masked */	\
94	andl	$~IRQ_LBIT(irq_num), apic_imen ;/* clear mask bit */	\
95	movq	IOAPICADDR(irq_num),%rcx ;	/* ioapic addr */	\
96	movl	REDIRIDX(irq_num), %eax ;	/* get the index */	\
97	movl	%eax,(%rcx) ;			/* write the index */	\
98	movl	IOAPIC_WINDOW(%rcx),%eax ;	/* current value */	\
99	andl	$~IOART_INTMASK,%eax ;		/* clear the mask */	\
100	movl	%eax,IOAPIC_WINDOW(%rcx) ;	/* new value */		\
1017: ;									\
102	APIC_IMASK_UNLOCK ;						\
1038: ;									\
104
105#ifdef APIC_IO
106
107/*
108 * Fast interrupt call handlers run in the following sequence:
109 *
110 *	- Push the trap frame required by doreti
111 *	- Mask the interrupt and reenable its source
112 *	- If we cannot take the interrupt set its fpending bit and
113 *	  doreti.  Note that we cannot mess with mp_lock at all
114 *	  if we entered from a critical section!
115 *	- If we can take the interrupt clear its fpending bit,
116 *	  call the handler, then unmask and doreti.
117 *
118 * YYY can cache gd base opitner instead of using hidden %fs prefixes.
119 */
120
121#define	FAST_INTR(irq_num, vec_name)					\
122	.text ;								\
123	SUPERALIGN_TEXT ;						\
124IDTVEC(vec_name) ;							\
125	APIC_PUSH_FRAME ;						\
126	FAKE_MCOUNT(15*4(%esp)) ;					\
127	MASK_LEVEL_IRQ(irq_num) ;					\
128	movq	lapic, %rax ;						\
129	movl	$0, LA_EOI(%rax) ;					\
130	movq	PCPU(curthread),%rbx ;					\
131	testl	$-1,TD_NEST_COUNT(%rbx) ;				\
132	jne	1f ;							\
133	cmpl	$TDPRI_CRIT,TD_PRI(%rbx) ;				\
134	jl	2f ;							\
1351: ;									\
136	/* in critical section, make interrupt pending */		\
137	/* set the pending bit and return, leave interrupt masked */	\
138	orl	$IRQ_LBIT(irq_num),PCPU(fpending) ;			\
139	orl	$RQF_INTPEND,PCPU(reqflags) ;				\
140	jmp	5f ;							\
1412: ;									\
142	/* clear pending bit, run handler */				\
143	andl	$~IRQ_LBIT(irq_num),PCPU(fpending) ;			\
144	pushq	$irq_num ;		/* trapframe -> intrframe */	\
145	movq	%rsp, %rdi ;		/* pass frame by reference */	\
146	addl	$TDPRI_CRIT,TD_PRI(%rbx) ;				\
147	call	ithread_fast_handler ;	/* returns 0 to unmask */	\
148	subl	$TDPRI_CRIT,TD_PRI(%rbx) ;				\
149	addq	$8, %rsp ;		/* intrframe -> trapframe */	\
150	UNMASK_IRQ(irq_num) ;						\
1515: ;									\
152	MEXITCOUNT ;							\
153	jmp	doreti ;						\
154
155#endif
156
157/*
158 * Handle "spurious INTerrupts".
159 * Notes:
160 *  This is different than the "spurious INTerrupt" generated by an
161 *   8259 PIC for missing INTs.  See the APIC documentation for details.
162 *  This routine should NOT do an 'EOI' cycle.
163 */
164	.text
165	SUPERALIGN_TEXT
166	.globl Xspuriousint
167Xspuriousint:
168
169	/* No EOI cycle used here */
170
171	iretq
172
173
174/*
175 * Handle TLB shootdowns.
176 */
177	.text
178	SUPERALIGN_TEXT
179	.globl	Xinvltlb
180Xinvltlb:
181	pushq	%rax
182
183	movq	%cr3, %rax		/* invalidate the TLB */
184	movq	%rax, %cr3
185
186	movq	lapic, %rax
187	movl	$0, LA_EOI(%rax)	/* End Of Interrupt to APIC */
188
189	popq	%rax
190	iretq
191
192
193/*
194 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
195 *
196 *  - Signals its receipt.
197 *  - Waits for permission to restart.
198 *  - Processing pending IPIQ events while waiting.
199 *  - Signals its restart.
200 */
201
202	.text
203	SUPERALIGN_TEXT
204	.globl Xcpustop
205Xcpustop:
206	pushq	%rbp
207	movq	%rsp, %rbp
208	/* We save registers that are not preserved across function calls. */
209	/* JG can be re-written with mov's */
210	pushq	%rax
211	pushq	%rcx
212	pushq	%rdx
213	pushq	%rsi
214	pushq	%rdi
215	pushq	%r8
216	pushq	%r9
217	pushq	%r10
218	pushq	%r11
219
220#if JG
221	/* JGXXX switch to kernel %gs? */
222	pushl	%ds			/* save current data segment */
223	pushl	%fs
224
225	movl	$KDSEL, %eax
226	mov	%ax, %ds		/* use KERNEL data segment */
227	movl	$KPSEL, %eax
228	mov	%ax, %fs
229#endif
230
231	movq	lapic, %rax
232	movl	$0, LA_EOI(%rax)	/* End Of Interrupt to APIC */
233
234	/* JG */
235	movl	PCPU(cpuid), %eax
236	imull	$PCB_SIZE, %eax
237	leaq	CNAME(stoppcbs), %rdi
238	addq	%rax, %rdi
239	call	CNAME(savectx)		/* Save process context */
240
241
242	movl	PCPU(cpuid), %eax
243
244	/*
245	 * Indicate that we have stopped and loop waiting for permission
246	 * to start again.  We must still process IPI events while in a
247	 * stopped state.
248	 */
249	MPLOCKED
250	btsl	%eax, stopped_cpus	/* stopped_cpus |= (1<<id) */
2511:
252	andl	$~RQF_IPIQ,PCPU(reqflags)
253	pushq	%rax
254	call	lwkt_smp_stopped
255	popq	%rax
256	btl	%eax, started_cpus	/* while (!(started_cpus & (1<<id))) */
257	jnc	1b
258
259	MPLOCKED
260	btrl	%eax, started_cpus	/* started_cpus &= ~(1<<id) */
261	MPLOCKED
262	btrl	%eax, stopped_cpus	/* stopped_cpus &= ~(1<<id) */
263
264	test	%eax, %eax
265	jnz	2f
266
267	movq	CNAME(cpustop_restartfunc), %rax
268	test	%rax, %rax
269	jz	2f
270	movq	$0, CNAME(cpustop_restartfunc)	/* One-shot */
271
272	call	*%rax
2732:
274	popq	%r11
275	popq	%r10
276	popq	%r9
277	popq	%r8
278	popq	%rdi
279	popq	%rsi
280	popq	%rdx
281	popq	%rcx
282	popq	%rax
283
284#if JG
285	popl	%fs
286	popl	%ds			/* restore previous data segment */
287#endif
288	movq	%rbp, %rsp
289	popq	%rbp
290	iretq
291
292	/*
293	 * For now just have one ipiq IPI, but what we really want is
294	 * to have one for each source cpu to the APICs don't get stalled
295	 * backlogging the requests.
296	 */
297	.text
298	SUPERALIGN_TEXT
299	.globl Xipiq
300Xipiq:
301	APIC_PUSH_FRAME
302	movq	lapic, %rax
303	movl	$0, LA_EOI(%rax)	/* End Of Interrupt to APIC */
304	FAKE_MCOUNT(15*4(%esp))
305
306	incl    PCPU(cnt) + V_IPI
307	movq	PCPU(curthread),%rbx
308	cmpl	$TDPRI_CRIT,TD_PRI(%rbx)
309	jge	1f
310	subq	$8,%rsp			/* make same as interrupt frame */
311	movq	%rsp,%rdi		/* pass frame by reference */
312	incl	PCPU(intr_nesting_level)
313	addl	$TDPRI_CRIT,TD_PRI(%rbx)
314	call	lwkt_process_ipiq_frame
315	subl	$TDPRI_CRIT,TD_PRI(%rbx)
316	decl	PCPU(intr_nesting_level)
317	addq	$8,%rsp			/* turn into trapframe */
318	MEXITCOUNT
319	jmp	doreti
3201:
321	orl	$RQF_IPIQ,PCPU(reqflags)
322	MEXITCOUNT
323	APIC_POP_FRAME
324	iretq
325
326	.text
327	SUPERALIGN_TEXT
328	.globl Xtimer
329Xtimer:
330	APIC_PUSH_FRAME
331	movq	lapic, %rax
332	movl	$0, LA_EOI(%rax)	/* End Of Interrupt to APIC */
333	FAKE_MCOUNT(15*4(%esp))
334
335	incl    PCPU(cnt) + V_TIMER
336	movq	PCPU(curthread),%rbx
337	cmpl	$TDPRI_CRIT,TD_PRI(%rbx)
338	jge	1f
339	testl	$-1,TD_NEST_COUNT(%rbx)
340	jne	1f
341	subq	$8,%rsp			/* make same as interrupt frame */
342	movq	%rsp,%rdi		/* pass frame by reference */
343	incl	PCPU(intr_nesting_level)
344	addl	$TDPRI_CRIT,TD_PRI(%rbx)
345	call	lapic_timer_process_frame
346	subl	$TDPRI_CRIT,TD_PRI(%rbx)
347	decl	PCPU(intr_nesting_level)
348	addq	$8,%rsp			/* turn into trapframe */
349	MEXITCOUNT
350	jmp	doreti
3511:
352	orl	$RQF_TIMER,PCPU(reqflags)
353	MEXITCOUNT
354	APIC_POP_FRAME
355	iretq
356
357#ifdef APIC_IO
358
359MCOUNT_LABEL(bintr)
360	FAST_INTR(0,apic_fastintr0)
361	FAST_INTR(1,apic_fastintr1)
362	FAST_INTR(2,apic_fastintr2)
363	FAST_INTR(3,apic_fastintr3)
364	FAST_INTR(4,apic_fastintr4)
365	FAST_INTR(5,apic_fastintr5)
366	FAST_INTR(6,apic_fastintr6)
367	FAST_INTR(7,apic_fastintr7)
368	FAST_INTR(8,apic_fastintr8)
369	FAST_INTR(9,apic_fastintr9)
370	FAST_INTR(10,apic_fastintr10)
371	FAST_INTR(11,apic_fastintr11)
372	FAST_INTR(12,apic_fastintr12)
373	FAST_INTR(13,apic_fastintr13)
374	FAST_INTR(14,apic_fastintr14)
375	FAST_INTR(15,apic_fastintr15)
376	FAST_INTR(16,apic_fastintr16)
377	FAST_INTR(17,apic_fastintr17)
378	FAST_INTR(18,apic_fastintr18)
379	FAST_INTR(19,apic_fastintr19)
380	FAST_INTR(20,apic_fastintr20)
381	FAST_INTR(21,apic_fastintr21)
382	FAST_INTR(22,apic_fastintr22)
383	FAST_INTR(23,apic_fastintr23)
384MCOUNT_LABEL(eintr)
385
386#endif
387
388	.data
389
390/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
391	.globl stopped_cpus, started_cpus
392stopped_cpus:
393	.long	0
394started_cpus:
395	.long	0
396
397	.globl CNAME(cpustop_restartfunc)
398CNAME(cpustop_restartfunc):
399	.quad 0
400
401	.globl	apic_pin_trigger
402apic_pin_trigger:
403	.long	0
404
405	.text
406
407