xref: /dragonfly/sys/platform/pc64/x86_64/ipl.s (revision 6a65d560)
1/*
2 * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * ---
35 *
36 * Copyright (c) 1989, 1990 William F. Jolitz.
37 * Copyright (c) 1990 The Regents of the University of California.
38 * All rights reserved.
39 *
40 * This code is derived from software contributed to Berkeley by
41 * William Jolitz.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 *    may be used to endorse or promote products derived from this software
53 *    without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 *	@(#)ipl.s
68 *
69 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
70 */
71
72#include <machine/asmacros.h>
73#include <machine/segments.h>
74#include <machine/ipl.h>
75#include <machine/lock.h>
76#include <machine/psl.h>
77#include <machine/trap.h>
78
79#include "assym.s"
80
81/*
82 * AT/386
83 * Vector interrupt control section
84 *
85 *  ipending	- Pending interrupts (set when a masked interrupt occurs)
86 *  spending	- Pending software interrupts
87 */
88	.data
89	ALIGN_DATA
90
91	.globl		fastunpend_count
92fastunpend_count:	.long	0
93
94	.text
95	SUPERALIGN_TEXT
96
97	/*
98	 * GENERAL NOTES
99	 *
100	 *	- interrupts are always called with a critical section held
101	 *
102	 *	- we release our critical section when scheduling interrupt
103	 *	  or softinterrupt threads in order so they can preempt
104	 *	  (unless we are called manually from a critical section, in
105	 *	  which case there will still be a critical section and
106	 *	  they won't preempt anyway).
107	 *
108	 *	- TD_NEST_COUNT prevents splz from nesting too deeply within
109	 *	  itself.  It is *not* actually an interrupt nesting count.
110	 *	  PCPU(intr_nesting_level) is an interrupt nesting count.
111	 *
112	 *	- We have to be careful in regards to local interrupts
113	 *	  occuring simultaniously with our doreti and splz
114	 *	  processing.
115	 *
116	 *	- Interrupts must be enabled when calling higher level
117	 *	  functions in order to avoid deadlocking against things
118	 *	  like smp_invltlb.
119	 */
120
121	/*
122	 * DORETI
123	 *
124	 * Handle return from interrupts, traps and syscalls.  This function
125	 * checks the cpl for unmasked pending interrupts (hardware or soft)
126	 * and schedules them if appropriate, then irets.
127	 *
128	 * If we are in a critical section we cannot run any pending ints.
129	 *
130	 * The stack contains a trapframe at the start of doreti.
131	 */
132	SUPERALIGN_TEXT
133	.globl	doreti
134	.type	doreti,@function
135doreti:
136	FAKE_MCOUNT(bintr)		/* init "from" bintr -> doreti */
137	movq	$0,%rax			/* irq mask unavailable due to BGL */
138	movq	PCPU(curthread),%rbx
139	cli				/* interlock with critical section */
140	cmpl	$0,PCPU(reqflags)	/* short cut if nothing to do */
141	je	5f
142	testl	$-1,TD_CRITCOUNT(%rbx)	/* can't unpend if in critical sec */
143	jne	5f
144	incl	TD_CRITCOUNT(%rbx)	/* force all ints to pending */
145doreti_next:
146	cli				/* re-assert cli on loop */
147	movq	%rax,%rcx		/* irq mask unavailable due to BGL */
148	notq	%rcx
149	testl	$RQF_IPIQ,PCPU(reqflags)
150	jnz	doreti_ipiq
151	testl	$RQF_TIMER,PCPU(reqflags)
152	jnz	doreti_timer
153	/*
154	 * check for an unmasked int (3 groups)
155	 */
156	movq	$0,%rdx
157	testq	PCPU_E8(ipending,%rdx),%rcx
158	jnz	doreti_fast
159
160	movq	$1,%rdx
161	testq	PCPU_E8(ipending,%rdx),%rcx
162	jnz	doreti_fast
163
164	movq	$2,%rdx
165	testq	PCPU_E8(ipending,%rdx),%rcx
166	jnz	doreti_fast
167
168	movl	PCPU(spending),%ecx	/* check for a pending software int */
169	cmpl	$0,%ecx
170	jnz	doreti_soft
171
172	testl	$RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
173	jz	2f
174
175	/* ASTs are only applicable when returning to userland */
176	testb	$SEL_RPL_MASK,TF_CS(%rsp)
177	jnz	doreti_ast
1782:
179	/*
180	 * Nothing left to do, finish up.  Interrupts are still disabled.
181	 * %eax contains the mask of IRQ's that are not available due to
182	 * BGL requirements.  We can only clear RQF_INTPEND if *ALL* pending
183	 * interrupts have been processed.
184	 */
185	decl	TD_CRITCOUNT(%rbx)	/* interlocked with cli */
186	testl	%eax,%eax
187	jnz	5f
188	andl	$~RQF_INTPEND,PCPU(reqflags)
1895:
190	MEXITCOUNT
191
192	/*
193	 * (interrupts are disabled here)
194	 *
195	 * Restore register and iret.  iret can fault on %rip (which is
196	 * really stupid).  If this occurs we re-fault and vector to
197	 * doreti_iret_fault().
198	 *
199	 * ...
200	 * can be set from user mode, this can result in a kernel mode
201	 * exception.  The trap code will revector to the *_fault code
202	 * which then sets up a T_PROTFLT signal.  If the signal is
203	 * sent to userland, sendsig() will automatically clean up all
204	 * the segment registers to avoid a loop.
205	 */
206	.globl	doreti_iret
207	.globl	doreti_syscall_ret
208doreti_syscall_ret:
209	POP_FRAME()		/* registers and %gs (+cli) */
210	/* WARNING: special global doreti_iret is  also used by exception.S */
211doreti_iret:
212	iretq
213
214	/*
215	 * doreti_iret_fault.  Alternative return code for the case where
216	 * we get a fault from iretq above.
217	 *
218	 * iretq -> IDT(prot) -> trap -> iretq -> doreti_iret_fault.
219	 *
220	 * The iretq above was trying to return to usermode and issued
221	 * a KMMUEXIT, so it might be on the trampoline stack.  We must
222	 * issue a KMMUENTER to ensure that we are on the correct stack
223	 * and have the correct mmu context.
224	 *
225	 * Interrupts are likely disabled due to the above interlock
226	 * between cli/iretq.  We must enable them before calling any
227	 * high level function.
228	 */
229	ALIGN_TEXT
230	.globl	doreti_iret_fault
231doreti_iret_fault:
232	KMMUENTER_TFRIP
233	subq	$TF_RIP,%rsp
234	PUSH_FRAME_REGS
235	sti
236	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
237	movq	$0,TF_ERR(%rsp)	/* XXX should be the error code */
238	movq	$0,TF_ADDR(%rsp)
239	FAKE_MCOUNT(TF_RIP(%rsp))
240	jmp	calltrap
241
242	/*
243	 * Interrupt pending.  NOTE: stack context holds frame structure
244	 * for interrupt procedure, do not do random pushes or pops!
245	 */
246	ALIGN_TEXT
247doreti_fast:
248	andq	PCPU_E8(ipending,%rdx),%rcx
249	sti
250	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
251	btrq	%rcx, PCPU_E8(ipending,%rdx)
252					/* is it really still pending? */
253	jnc	doreti_next
254
255	shlq	$6, %rdx
256	orq	%rdx, %rcx		/* form intr number */
257
258	pushq	%rax			/* save IRQ mask unavailable for BGL */
259					/* NOTE: is also CPL in frame */
260	call	dofastunpend		/* unpend intr %rcx */
261	popq	%rax
262	jmp	doreti_next
263
264	/*
265	 *  SOFT interrupt pending
266	 *
267	 *  Temporarily back-out our critical section to allow an interrupt
268	 *  preempt us when we schedule it.  Bump intr_nesting_level to
269	 *  prevent the switch code from recursing via splz too deeply.
270	 */
271	ALIGN_TEXT
272doreti_soft:
273	sti
274	bsfl	%ecx,%ecx		/* locate the next pending softint */
275	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
276	jnc	doreti_next
277	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
278	pushq	%rax
279	movl	%ecx,%edi		/* argument to C call */
280	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
281	decl	TD_CRITCOUNT(%rbx)	/* so we can preempt */
282	call	sched_ithd_soft		/* YYY must pull in imasks */
283	incl	TD_CRITCOUNT(%rbx)
284	decl	TD_NEST_COUNT(%rbx)
285	popq	%rax
286	jmp	doreti_next
287
288	/*
289	 * AST pending.  We clear RQF_AST_SIGNAL automatically, the others
290	 * are cleared by the trap as they are processed.
291	 *
292	 * Temporarily back-out our critical section because trap() can be
293	 * a long-winded call, and we want to be more syscall-like.
294	 *
295	 * YYY theoretically we can call lwkt_switch directly if all we need
296	 * to do is a reschedule.
297	 */
298doreti_ast:
299	andl	$~RQF_AST_SIGNAL,PCPU(reqflags)
300	sti
301	movl	%eax,%r12d		/* save cpl (can't use stack) */
302	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
303	movq	%rsp,%rdi		/* pass frame by ref (%edi = C arg) */
304	decl	TD_CRITCOUNT(%rbx)
305	call	trap
306	incl	TD_CRITCOUNT(%rbx)
307	movl	%r12d,%eax		/* restore cpl for loop */
308	jmp	doreti_next
309
310	/*
311	 * IPIQ message pending.  We clear RQF_IPIQ automatically.
312	 */
313doreti_ipiq:
314	movl	%eax,%r12d		/* save cpl (can't use stack) */
315	incl	PCPU(intr_nesting_level)
316	andl	$~RQF_IPIQ,PCPU(reqflags)
317	subq	%rax,%rax
318	sti
319	xchgl	%eax,PCPU(npoll)	/* (atomic op) allow another Xipi */
320	subq	$8,%rsp			/* trapframe->intrframe */
321	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
322	call	lwkt_process_ipiq_frame
323	addq	$8,%rsp			/* intrframe->trapframe */
324	decl	PCPU(intr_nesting_level)
325	movl	%r12d,%eax		/* restore cpl for loop */
326	jmp	doreti_next
327
328doreti_timer:
329	movl	%eax,%r12d		/* save cpl (can't use stack) */
330	incl	PCPU(intr_nesting_level)
331	andl	$~RQF_TIMER,PCPU(reqflags)
332	sti
333	subq	$8,%rsp			/* trapframe->intrframe */
334	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
335	call	pcpu_timer_process_frame
336	addq	$8,%rsp			/* intrframe->trapframe */
337	decl	PCPU(intr_nesting_level)
338	movl	%r12d,%eax		/* restore cpl for loop */
339	jmp	doreti_next
340
341	/*
342	 * SPLZ() a C callable procedure to dispatch any unmasked pending
343	 *	  interrupts regardless of critical section nesting.  ASTs
344	 *	  are not dispatched.
345	 *
346	 * 	  Use %eax to track those IRQs that could not be processed
347	 *	  due to BGL requirements.
348	 */
349	SUPERALIGN_TEXT
350
351ENTRY(splz)
352	pushfq
353	pushq	%rbx
354	movq	PCPU(curthread),%rbx
355	incl	TD_CRITCOUNT(%rbx)
356	movq	$0,%rax
357
358splz_next:
359	cli
360	movq	%rax,%rcx		/* rcx = ~CPL */
361	notq	%rcx
362	testl	$RQF_IPIQ,PCPU(reqflags)
363	jnz	splz_ipiq
364	testl	$RQF_TIMER,PCPU(reqflags)
365	jnz	splz_timer
366	/*
367	 * check for an unmasked int (3 groups)
368	 */
369	movq	$0,%rdx
370	testq	PCPU_E8(ipending,%rdx),%rcx
371	jnz	splz_fast
372
373	movq	$1,%rdx
374	testq	PCPU_E8(ipending,%rdx),%rcx
375	jnz	splz_fast
376
377	movq	$2,%rdx
378	testq	PCPU_E8(ipending,%rdx),%rcx
379	jnz	splz_fast
380
381	movl	PCPU(spending),%ecx
382	cmpl	$0,%ecx
383	jnz	splz_soft
384
385	decl	TD_CRITCOUNT(%rbx)
386
387	/*
388	 * Nothing left to do, finish up.  Interrupts are still disabled.
389	 * If our mask of IRQs we couldn't process due to BGL requirements
390	 * is 0 then there are no pending interrupt sources left and we
391	 * can clear RQF_INTPEND.
392	 */
393	testl	%eax,%eax
394	jnz	5f
395	andl	$~RQF_INTPEND,PCPU(reqflags)
3965:
397	popq	%rbx
398	popfq
399	ret
400
401	/*
402	 * Interrupt pending
403	 */
404	ALIGN_TEXT
405splz_fast:
406	andq	PCPU_E8(ipending,%rdx),%rcx
407	sti
408	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
409	btrq	%rcx, PCPU_E8(ipending,%rdx)
410					/* is it really still pending? */
411	jnc	splz_next
412
413	shlq	$6, %rdx
414	orq	%rdx, %rcx		/* form intr number */
415
416	pushq	%rax
417	call	dofastunpend		/* unpend intr %rcx */
418	popq	%rax
419	jmp	splz_next
420
421	/*
422	 *  SOFT interrupt pending
423	 *
424	 *  Temporarily back-out our critical section to allow the interrupt
425	 *  preempt us.
426	 */
427	ALIGN_TEXT
428splz_soft:
429	sti
430	bsfl	%ecx,%ecx		/* locate the next pending softint */
431	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
432	jnc	splz_next
433	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
434	sti
435	pushq	%rax
436	movl	%ecx,%edi		/* C argument */
437	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
438	decl	TD_CRITCOUNT(%rbx)
439	call	sched_ithd_soft		/* YYY must pull in imasks */
440	incl	TD_CRITCOUNT(%rbx)
441	decl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
442	popq	%rax
443	jmp	splz_next
444
445splz_ipiq:
446	andl	$~RQF_IPIQ,PCPU(reqflags)
447	sti
448	pushq	%rax
449	subq	%rax,%rax
450	xchgl	%eax,PCPU(npoll)	/* (atomic op) allow another Xipi */
451	call	lwkt_process_ipiq
452	popq	%rax
453	jmp	splz_next
454
455splz_timer:
456	andl	$~RQF_TIMER,PCPU(reqflags)
457	sti
458	pushq	%rax
459	call	pcpu_timer_process
460	popq	%rax
461	jmp	splz_next
462
463	/*
464	 * dofastunpend(%rcx:intr)
465	 *
466	 * A interrupt previously made pending can now be run,
467	 * execute it by pushing a dummy interrupt frame and
468	 * calling ithread_fast_handler to execute or schedule it.
469	 *
470	 * ithread_fast_handler() returns 0 if it wants us to unmask
471	 * further interrupts.
472	 */
473#define PUSH_DUMMY							\
474	pushfq ;			/* phys int frame / flags */	\
475	xorq	%rax,%rax ;		/* something not SEL_UPL */	\
476	pushq	%rax ;			/* phys int frame / cs */	\
477	pushq	3*8(%rsp) ;		/* original caller eip */	\
478	subq	$TF_RIP,%rsp ;		/* trap frame */		\
479	movq	$0,TF_XFLAGS(%rsp) ;	/* extras */			\
480	movq	$0,TF_TRAPNO(%rsp) ;	/* extras */			\
481	movq	$0,TF_ADDR(%rsp) ;	/* extras */			\
482	movq	$0,TF_FLAGS(%rsp) ;	/* extras */			\
483	movq	$0,TF_ERR(%rsp) ;	/* extras */			\
484
485#define POP_DUMMY							\
486	addq	$TF_RIP+(3*8),%rsp ;					\
487
488dofastunpend:
489	pushq	%rbp			/* frame for backtrace */
490	movq	%rsp,%rbp
491	PUSH_DUMMY
492	pushq	%rcx			/* last part of intrframe = intr */
493	incl	fastunpend_count
494	movq	%rsp,%rdi		/* pass frame by reference C arg */
495	call	ithread_fast_handler	/* returns 0 to unmask */
496	popq	%rdi			/* intrframe->trapframe */
497					/* + also rdi C arg to next call */
498	cmpl	$0,%eax
499	jnz	1f
500	movq	MachIntrABI + MACHINTR_INTREN, %rax
501	callq	*%rax			/* MachIntrABI.intren(intr) */
5021:
503	POP_DUMMY
504	popq	%rbp
505	ret
506
507