xref: /dragonfly/sys/platform/pc64/x86_64/ipl.s (revision 0de090e1)
1/*
2 * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * ---
35 *
36 * Copyright (c) 1989, 1990 William F. Jolitz.
37 * Copyright (c) 1990 The Regents of the University of California.
38 * All rights reserved.
39 *
40 * This code is derived from software contributed to Berkeley by
41 * William Jolitz.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 *    may be used to endorse or promote products derived from this software
53 *    without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 *	@(#)ipl.s
68 *
69 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
70 */
71
72#include <machine/asmacros.h>
73#include <machine/segments.h>
74#include <machine/ipl.h>
75#include <machine/lock.h>
76#include <machine/psl.h>
77#include <machine/trap.h>
78
79#include "assym.s"
80
81/*
82 * AT/386
83 * Vector interrupt control section
84 *
85 *  ipending	- Pending interrupts (set when a masked interrupt occurs)
86 *  spending	- Pending software interrupts
87 */
88	.data
89	ALIGN_DATA
90
91	.globl		fastunpend_count
92fastunpend_count:	.long	0
93
94	.text
95	SUPERALIGN_TEXT
96
97	/*
98	 * GENERAL NOTES
99	 *
100	 *	- interrupts are always called with a critical section held
101	 *
102	 *	- we release our critical section when scheduling interrupt
103	 *	  or softinterrupt threads in order so they can preempt
104	 *	  (unless we are called manually from a critical section, in
105	 *	  which case there will still be a critical section and
106	 *	  they won't preempt anyway).
107	 *
108	 *	- TD_NEST_COUNT prevents splz from nesting too deeply within
109	 *	  itself.  It is *not* actually an interrupt nesting count.
110	 *	  PCPU(intr_nesting_level) is an interrupt nesting count.
111	 *
112	 *	- We have to be careful in regards to local interrupts
113	 *	  occuring simultaniously with our doreti and splz
114	 *	  processing.
115	 *
116	 *	- Interrupts must be enabled when calling higher level
117	 *	  functions in order to avoid deadlocking against things
118	 *	  like smp_invltlb.
119	 */
120
121	/*
122	 * DORETI
123	 *
124	 * Handle return from interrupts, traps and syscalls.  This function
125	 * checks the cpl for unmasked pending interrupts (hardware or soft)
126	 * and schedules them if appropriate, then irets.
127	 *
128	 * If we are in a critical section we cannot run any pending ints.
129	 *
130	 * The stack contains a trapframe at the start of doreti.
131	 */
132	SUPERALIGN_TEXT
133	.globl	doreti
134	.type	doreti,@function
135doreti:
136	FAKE_MCOUNT(bintr)		/* init "from" bintr -> doreti */
137	movq	$0,%rax			/* irq mask unavailable due to BGL */
138	movq	PCPU(curthread),%rbx
139	cli				/* interlock with critical section */
140	cmpl	$0,PCPU(reqflags)	/* short cut if nothing to do */
141	je	5f
142	testl	$-1,TD_CRITCOUNT(%rbx)	/* can't unpend if in critical sec */
143	jne	5f
144	incl	TD_CRITCOUNT(%rbx)	/* force all ints to pending */
145doreti_next:
146	cli				/* re-assert cli on loop */
147	movq	%rax,%rcx		/* irq mask unavailable due to BGL */
148	notq	%rcx
149	testl	$RQF_IPIQ,PCPU(reqflags)
150	jnz	doreti_ipiq
151	testl	$RQF_TIMER,PCPU(reqflags)
152	jnz	doreti_timer
153	/*
154	 * check for an unmasked int (3 groups)
155	 */
156	movq	$0,%rdx
157	testq	PCPU_E8(ipending,%rdx),%rcx
158	jnz	doreti_fast
159
160	movq	$1,%rdx
161	testq	PCPU_E8(ipending,%rdx),%rcx
162	jnz	doreti_fast
163
164	movq	$2,%rdx
165	testq	PCPU_E8(ipending,%rdx),%rcx
166	jnz	doreti_fast
167
168	movl	PCPU(spending),%ecx	/* check for a pending software int */
169	cmpl	$0,%ecx
170	jnz	doreti_soft
171
172	testl	$RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
173	jz	2f
174
175	/* ASTs are only applicable when returning to userland */
176	testb	$SEL_RPL_MASK,TF_CS(%rsp)
177	jnz	doreti_ast
1782:
179	/*
180	 * Nothing left to do, finish up.  Interrupts are still disabled.
181	 * %eax contains the mask of IRQ's that are not available due to
182	 * BGL requirements.  We can only clear RQF_INTPEND if *ALL* pending
183	 * interrupts have been processed.
184	 */
185	decl	TD_CRITCOUNT(%rbx)	/* interlocked with cli */
186	testl	%eax,%eax
187	jnz	5f
188	andl	$~RQF_INTPEND,PCPU(reqflags)
1895:
190	MEXITCOUNT
191
192	/*
193	 * (interrupts are disabled here)
194	 *
195	 * Restore register and iret.  iret can fault on %rip (which is
196	 * really stupid).  If this occurs we re-fault and vector to
197	 * doreti_iret_fault().
198	 *
199	 * ...
200	 * can be set from user mode, this can result in a kernel mode
201	 * exception.  The trap code will revector to the *_fault code
202	 * which then sets up a T_PROTFLT signal.  If the signal is
203	 * sent to userland, sendsig() will automatically clean up all
204	 * the segment registers to avoid a loop.
205	 */
206	.globl	doreti_iret
207	.globl	doreti_syscall_ret
208doreti_syscall_ret:
209	POP_FRAME		/* registers and %gs (+cli) */
210	/* WARNING: special global doreti_iret is  also used by exception.S */
211doreti_iret:
212	iretq
213
214	/*
215	 * doreti_iret_fault.  Alternative return code for the case where
216	 * we get a fault in the doreti_exit code above.  trap()
217	 * (sys/platform/pc64/x86_64/trap.c) catches this specific * case,
218	 * sends the process a signal and continues in the corresponding
219	 * place in the code below.
220	 *
221	 * Interrupts are likely disabled due to the above interlock
222	 * between cli/iretq.  We must enable them before calling any
223	 * high level function.
224	 */
225	ALIGN_TEXT
226	.globl	doreti_iret_fault
227doreti_iret_fault:
228	PUSH_FRAME_NOSWAP
229	sti
230	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
231	movq	$0,TF_ERR(%rsp)	/* XXX should be the error code */
232	movq	$0,TF_ADDR(%rsp)
233	FAKE_MCOUNT(TF_RIP(%rsp))
234	jmp	calltrap
235
236	/*
237	 * Interrupt pending.  NOTE: stack context holds frame structure
238	 * for interrupt procedure, do not do random pushes or pops!
239	 */
240	ALIGN_TEXT
241doreti_fast:
242	andq	PCPU_E8(ipending,%rdx),%rcx
243	sti
244	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
245	btrq	%rcx, PCPU_E8(ipending,%rdx)
246					/* is it really still pending? */
247	jnc	doreti_next
248
249	shlq	$6, %rdx
250	orq	%rdx, %rcx		/* form intr number */
251
252	pushq	%rax			/* save IRQ mask unavailable for BGL */
253					/* NOTE: is also CPL in frame */
254	call	dofastunpend		/* unpend intr %rcx */
255	popq	%rax
256	jmp	doreti_next
257
258	/*
259	 *  SOFT interrupt pending
260	 *
261	 *  Temporarily back-out our critical section to allow an interrupt
262	 *  preempt us when we schedule it.  Bump intr_nesting_level to
263	 *  prevent the switch code from recursing via splz too deeply.
264	 */
265	ALIGN_TEXT
266doreti_soft:
267	sti
268	bsfl	%ecx,%ecx		/* locate the next pending softint */
269	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
270	jnc	doreti_next
271	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
272	pushq	%rax
273	movl	%ecx,%edi		/* argument to C call */
274	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
275	decl	TD_CRITCOUNT(%rbx)	/* so we can preempt */
276	call	sched_ithd_soft		/* YYY must pull in imasks */
277	incl	TD_CRITCOUNT(%rbx)
278	decl	TD_NEST_COUNT(%rbx)
279	popq	%rax
280	jmp	doreti_next
281
282	/*
283	 * AST pending.  We clear RQF_AST_SIGNAL automatically, the others
284	 * are cleared by the trap as they are processed.
285	 *
286	 * Temporarily back-out our critical section because trap() can be
287	 * a long-winded call, and we want to be more syscall-like.
288	 *
289	 * YYY theoretically we can call lwkt_switch directly if all we need
290	 * to do is a reschedule.
291	 */
292doreti_ast:
293	andl	$~RQF_AST_SIGNAL,PCPU(reqflags)
294	sti
295	movl	%eax,%r12d		/* save cpl (can't use stack) */
296	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
297	movq	%rsp,%rdi		/* pass frame by ref (%edi = C arg) */
298	decl	TD_CRITCOUNT(%rbx)
299	call	trap
300	incl	TD_CRITCOUNT(%rbx)
301	movl	%r12d,%eax		/* restore cpl for loop */
302	jmp	doreti_next
303
304	/*
305	 * IPIQ message pending.  We clear RQF_IPIQ automatically.
306	 */
307doreti_ipiq:
308	movl	%eax,%r12d		/* save cpl (can't use stack) */
309	incl	PCPU(intr_nesting_level)
310	andl	$~RQF_IPIQ,PCPU(reqflags)
311	subq	%rax,%rax
312	sti
313	xchgl	%eax,PCPU(npoll)	/* (atomic op) allow another Xipi */
314	subq	$8,%rsp			/* trapframe->intrframe */
315	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
316	call	lwkt_process_ipiq_frame
317	addq	$8,%rsp			/* intrframe->trapframe */
318	decl	PCPU(intr_nesting_level)
319	movl	%r12d,%eax		/* restore cpl for loop */
320	jmp	doreti_next
321
322doreti_timer:
323	movl	%eax,%r12d		/* save cpl (can't use stack) */
324	incl	PCPU(intr_nesting_level)
325	andl	$~RQF_TIMER,PCPU(reqflags)
326	sti
327	subq	$8,%rsp			/* trapframe->intrframe */
328	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
329	call	pcpu_timer_process_frame
330	addq	$8,%rsp			/* intrframe->trapframe */
331	decl	PCPU(intr_nesting_level)
332	movl	%r12d,%eax		/* restore cpl for loop */
333	jmp	doreti_next
334
335	/*
336	 * SPLZ() a C callable procedure to dispatch any unmasked pending
337	 *	  interrupts regardless of critical section nesting.  ASTs
338	 *	  are not dispatched.
339	 *
340	 * 	  Use %eax to track those IRQs that could not be processed
341	 *	  due to BGL requirements.
342	 */
343	SUPERALIGN_TEXT
344
345ENTRY(splz)
346	pushfq
347	pushq	%rbx
348	movq	PCPU(curthread),%rbx
349	incl	TD_CRITCOUNT(%rbx)
350	movq	$0,%rax
351
352splz_next:
353	cli
354	movq	%rax,%rcx		/* rcx = ~CPL */
355	notq	%rcx
356	testl	$RQF_IPIQ,PCPU(reqflags)
357	jnz	splz_ipiq
358	testl	$RQF_TIMER,PCPU(reqflags)
359	jnz	splz_timer
360	/*
361	 * check for an unmasked int (3 groups)
362	 */
363	movq	$0,%rdx
364	testq	PCPU_E8(ipending,%rdx),%rcx
365	jnz	splz_fast
366
367	movq	$1,%rdx
368	testq	PCPU_E8(ipending,%rdx),%rcx
369	jnz	splz_fast
370
371	movq	$2,%rdx
372	testq	PCPU_E8(ipending,%rdx),%rcx
373	jnz	splz_fast
374
375	movl	PCPU(spending),%ecx
376	cmpl	$0,%ecx
377	jnz	splz_soft
378
379	decl	TD_CRITCOUNT(%rbx)
380
381	/*
382	 * Nothing left to do, finish up.  Interrupts are still disabled.
383	 * If our mask of IRQs we couldn't process due to BGL requirements
384	 * is 0 then there are no pending interrupt sources left and we
385	 * can clear RQF_INTPEND.
386	 */
387	testl	%eax,%eax
388	jnz	5f
389	andl	$~RQF_INTPEND,PCPU(reqflags)
3905:
391	popq	%rbx
392	popfq
393	ret
394
395	/*
396	 * Interrupt pending
397	 */
398	ALIGN_TEXT
399splz_fast:
400	andq	PCPU_E8(ipending,%rdx),%rcx
401	sti
402	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
403	btrq	%rcx, PCPU_E8(ipending,%rdx)
404					/* is it really still pending? */
405	jnc	splz_next
406
407	shlq	$6, %rdx
408	orq	%rdx, %rcx		/* form intr number */
409
410	pushq	%rax
411	call	dofastunpend		/* unpend intr %rcx */
412	popq	%rax
413	jmp	splz_next
414
415	/*
416	 *  SOFT interrupt pending
417	 *
418	 *  Temporarily back-out our critical section to allow the interrupt
419	 *  preempt us.
420	 */
421	ALIGN_TEXT
422splz_soft:
423	sti
424	bsfl	%ecx,%ecx		/* locate the next pending softint */
425	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
426	jnc	splz_next
427	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
428	sti
429	pushq	%rax
430	movl	%ecx,%edi		/* C argument */
431	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
432	decl	TD_CRITCOUNT(%rbx)
433	call	sched_ithd_soft		/* YYY must pull in imasks */
434	incl	TD_CRITCOUNT(%rbx)
435	decl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
436	popq	%rax
437	jmp	splz_next
438
439splz_ipiq:
440	andl	$~RQF_IPIQ,PCPU(reqflags)
441	sti
442	pushq	%rax
443	subq	%rax,%rax
444	xchgl	%eax,PCPU(npoll)	/* (atomic op) allow another Xipi */
445	call	lwkt_process_ipiq
446	popq	%rax
447	jmp	splz_next
448
449splz_timer:
450	andl	$~RQF_TIMER,PCPU(reqflags)
451	sti
452	pushq	%rax
453	call	pcpu_timer_process
454	popq	%rax
455	jmp	splz_next
456
457	/*
458	 * dofastunpend(%rcx:intr)
459	 *
460	 * A interrupt previously made pending can now be run,
461	 * execute it by pushing a dummy interrupt frame and
462	 * calling ithread_fast_handler to execute or schedule it.
463	 *
464	 * ithread_fast_handler() returns 0 if it wants us to unmask
465	 * further interrupts.
466	 */
467#define PUSH_DUMMY							\
468	pushfq ;			/* phys int frame / flags */	\
469	xorq	%rax,%rax ;		/* something not SEL_UPL */	\
470	pushq	%rax ;			/* phys int frame / cs */	\
471	pushq	3*8(%rsp) ;		/* original caller eip */	\
472	subq	$TF_RIP,%rsp ;		/* trap frame */		\
473	movq	$0,TF_XFLAGS(%rsp) ;	/* extras */			\
474	movq	$0,TF_TRAPNO(%rsp) ;	/* extras */			\
475	movq	$0,TF_ADDR(%rsp) ;	/* extras */			\
476	movq	$0,TF_FLAGS(%rsp) ;	/* extras */			\
477	movq	$0,TF_ERR(%rsp) ;	/* extras */			\
478
479#define POP_DUMMY							\
480	addq	$TF_RIP+(3*8),%rsp ;					\
481
482dofastunpend:
483	pushq	%rbp			/* frame for backtrace */
484	movq	%rsp,%rbp
485	PUSH_DUMMY
486	pushq	%rcx			/* last part of intrframe = intr */
487	incl	fastunpend_count
488	movq	%rsp,%rdi		/* pass frame by reference C arg */
489	call	ithread_fast_handler	/* returns 0 to unmask */
490	popq	%rdi			/* intrframe->trapframe */
491					/* + also rdi C arg to next call */
492	cmpl	$0,%eax
493	jnz	1f
494	movq	MachIntrABI + MACHINTR_INTREN, %rax
495	callq	*%rax			/* MachIntrABI.intren(intr) */
4961:
497	POP_DUMMY
498	popq	%rbp
499	ret
500
501