xref: /dragonfly/sys/platform/pc64/x86_64/ipl.s (revision cc87404e)
1/*
2 * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * ---
35 *
36 * Copyright (c) 1989, 1990 William F. Jolitz.
37 * Copyright (c) 1990 The Regents of the University of California.
38 * All rights reserved.
39 *
40 * This code is derived from software contributed to Berkeley by
41 * William Jolitz.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 *    may be used to endorse or promote products derived from this software
53 *    without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 *	@(#)ipl.s
68 *
69 * $FreeBSD: src/sys/i386/isa/ipl.s,v 1.32.2.3 2002/05/16 16:03:56 bde Exp $
70 */
71
72#include <machine/asmacros.h>
73#include <machine/segments.h>
74#include <machine/ipl.h>
75#include <machine/lock.h>
76#include <machine/psl.h>
77#include <machine/trap.h>
78
79#include "assym.s"
80
81/*
82 * AT/386
83 * Vector interrupt control section
84 *
85 *  ipending	- Pending interrupts (set when a masked interrupt occurs)
86 *  spending	- Pending software interrupts
87 */
88	.data
89	ALIGN_DATA
90
91	.globl		fastunpend_count
92fastunpend_count:	.long	0
93
94	.text
95	SUPERALIGN_TEXT
96
97	/*
98	 * GENERAL NOTES
99	 *
100	 *	- interrupts are always called with a critical section held
101	 *
102	 *	- we release our critical section when scheduling interrupt
103	 *	  or softinterrupt threads in order so they can preempt
104	 *	  (unless we are called manually from a critical section, in
105	 *	  which case there will still be a critical section and
106	 *	  they won't preempt anyway).
107	 *
108	 *	- TD_NEST_COUNT prevents splz from nesting too deeply within
109	 *	  itself.  It is *not* actually an interrupt nesting count.
110	 *	  PCPU(intr_nesting_level) is an interrupt nesting count.
111	 *
112	 *	- We have to be careful in regards to local interrupts
113	 *	  occuring simultaniously with our doreti and splz
114	 *	  processing.
115	 *
116	 *	- Interrupts must be enabled when calling higher level
117	 *	  functions in order to avoid deadlocking against things
118	 *	  like smp_invltlb.
119	 */
120
121	/*
122	 * DORETI
123	 *
124	 * Handle return from interrupts, traps and syscalls.  This function
125	 * checks the cpl for unmasked pending interrupts (hardware or soft)
126	 * and schedules them if appropriate, then irets.
127	 *
128	 * If we are in a critical section we cannot run any pending ints.
129	 *
130	 * The stack contains a trapframe at the start of doreti.
131	 */
132	SUPERALIGN_TEXT
133	.globl	doreti
134	.type	doreti,@function
135doreti:
136	FAKE_MCOUNT(bintr)		/* init "from" bintr -> doreti */
137	movq	$0,%rax			/* irq mask unavailable due to BGL */
138	movq	PCPU(curthread),%rbx
139	cli				/* interlock with critical section */
140	cmpl	$0,PCPU(reqflags)	/* short cut if nothing to do */
141	je	5f
142	testl	$-1,TD_CRITCOUNT(%rbx)	/* can't unpend if in critical sec */
143	jne	5f
144	incl	TD_CRITCOUNT(%rbx)	/* force all ints to pending */
145doreti_next:
146	cli				/* re-assert cli on loop */
147	movq	%rax,%rcx		/* irq mask unavailable due to BGL */
148	notq	%rcx
149	testl	$RQF_IPIQ,PCPU(reqflags)
150	jnz	doreti_ipiq
151	testl	$RQF_TIMER,PCPU(reqflags)
152	jnz	doreti_timer
153	/*
154	 * check for an unmasked int (3 groups)
155	 */
156	movq	$0,%rdx
157	testq	PCPU_E8(ipending,%rdx),%rcx
158	jnz	doreti_fast
159
160	movq	$1,%rdx
161	testq	PCPU_E8(ipending,%rdx),%rcx
162	jnz	doreti_fast
163
164	movq	$2,%rdx
165	testq	PCPU_E8(ipending,%rdx),%rcx
166	jnz	doreti_fast
167
168	movl	PCPU(spending),%ecx	/* check for a pending software int */
169	cmpl	$0,%ecx
170	jnz	doreti_soft
171
172	testl	$RQF_AST_MASK,PCPU(reqflags) /* any pending ASTs? */
173	jz	2f
174
175	/* ASTs are only applicable when returning to userland */
176	testb	$SEL_RPL_MASK,TF_CS(%rsp)
177	jnz	doreti_ast
1782:
179	/*
180	 * Nothing left to do, finish up.  Interrupts are still disabled.
181	 * %eax contains the mask of IRQ's that are not available due to
182	 * BGL requirements.  We can only clear RQF_INTPEND if *ALL* pending
183	 * interrupts have been processed.
184	 */
185	decl	TD_CRITCOUNT(%rbx)	/* interlocked with cli */
186	testl	%eax,%eax
187	jnz	5f
188	andl	$~RQF_INTPEND,PCPU(reqflags)
1895:
190	MEXITCOUNT
191
192	/*
193	 * (interrupts are disabled here)
194	 *
195	 * Restore register and iret.  iret can fault on %rip (which is
196	 * really stupid).  If this occurs we re-fault and vector to
197	 * doreti_iret_fault().
198	 *
199	 * ...
200	 * can be set from user mode, this can result in a kernel mode
201	 * exception.  The trap code will revector to the *_fault code
202	 * which then sets up a T_PROTFLT signal.  If the signal is
203	 * sent to userland, sendsig() will automatically clean up all
204	 * the segment registers to avoid a loop.
205	 */
206	.globl	doreti_iret
207	.globl	doreti_syscall_ret
208doreti_syscall_ret:
209	POP_FRAME		/* registers and %gs (+cli) */
210	/* WARNING: special global doreti_iret is  also used by exception.S */
211doreti_iret:
212	iretq
213
214	/*
215	 * doreti_iret_fault.  Alternative return code for the case where
216	 * we get a fault in the doreti_exit code above.  trap()
217	 * (sys/platform/pc64/x86_64/trap.c) catches this specific * case,
218	 * sends the process a signal and continues in the corresponding
219	 * place in the code below.
220	 *
221	 * Interrupts are likely disabled due to the above interlock
222	 * between cli/iretq.  We must enable them before calling any
223	 * high level function.
224	 */
225	ALIGN_TEXT
226	.globl	doreti_iret_fault
227doreti_iret_fault:
228	PUSH_FRAME_NOSWAP
229	sti
230	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
231	movq	$0,TF_ERR(%rsp)	/* XXX should be the error code */
232	movq	$0,TF_ADDR(%rsp)
233	FAKE_MCOUNT(TF_RIP(%rsp))
234	jmp	calltrap
235
236	/*
237	 * Interrupt pending.  NOTE: stack context holds frame structure
238	 * for interrupt procedure, do not do random pushes or pops!
239	 */
240	ALIGN_TEXT
241doreti_fast:
242	andq	PCPU_E8(ipending,%rdx),%rcx
243	sti
244	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
245	btrq	%rcx, PCPU_E8(ipending,%rdx)
246					/* is it really still pending? */
247	jnc	doreti_next
248
249	shlq	$6, %rdx
250	orq	%rdx, %rcx		/* form intr number */
251
252	pushq	%rax			/* save IRQ mask unavailable for BGL */
253					/* NOTE: is also CPL in frame */
254	call	dofastunpend		/* unpend intr %rcx */
255	popq	%rax
256	jmp	doreti_next
257
258	/*
259	 *  SOFT interrupt pending
260	 *
261	 *  Temporarily back-out our critical section to allow an interrupt
262	 *  preempt us when we schedule it.  Bump intr_nesting_level to
263	 *  prevent the switch code from recursing via splz too deeply.
264	 */
265	ALIGN_TEXT
266doreti_soft:
267	sti
268	bsfl	%ecx,%ecx		/* locate the next pending softint */
269	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
270	jnc	doreti_next
271	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
272	pushq	%rax
273	movl	%ecx,%edi		/* argument to C call */
274	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
275	decl	TD_CRITCOUNT(%rbx)	/* so we can preempt */
276	call	sched_ithd_soft		/* YYY must pull in imasks */
277	incl	TD_CRITCOUNT(%rbx)
278	decl	TD_NEST_COUNT(%rbx)
279	popq	%rax
280	jmp	doreti_next
281
282	/*
283	 * AST pending.  We clear RQF_AST_SIGNAL automatically, the others
284	 * are cleared by the trap as they are processed.
285	 *
286	 * Temporarily back-out our critical section because trap() can be
287	 * a long-winded call, and we want to be more syscall-like.
288	 *
289	 * YYY theoretically we can call lwkt_switch directly if all we need
290	 * to do is a reschedule.
291	 */
292doreti_ast:
293	andl	$~RQF_AST_SIGNAL,PCPU(reqflags)
294	sti
295	movl	%eax,%r12d		/* save cpl (can't use stack) */
296	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
297	movq	%rsp,%rdi		/* pass frame by ref (%edi = C arg) */
298	decl	TD_CRITCOUNT(%rbx)
299	call	trap
300	incl	TD_CRITCOUNT(%rbx)
301	movl	%r12d,%eax		/* restore cpl for loop */
302	jmp	doreti_next
303
304	/*
305	 * IPIQ message pending.  We clear RQF_IPIQ automatically.
306	 */
307doreti_ipiq:
308	movl	%eax,%r12d		/* save cpl (can't use stack) */
309	incl	PCPU(intr_nesting_level)
310	andl	$~RQF_IPIQ,PCPU(reqflags)
311	sti
312	subq	$8,%rsp			/* trapframe->intrframe */
313	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
314	call	lwkt_process_ipiq_frame
315	addq	$8,%rsp			/* intrframe->trapframe */
316	decl	PCPU(intr_nesting_level)
317	movl	%r12d,%eax		/* restore cpl for loop */
318	jmp	doreti_next
319
320doreti_timer:
321	movl	%eax,%r12d		/* save cpl (can't use stack) */
322	incl	PCPU(intr_nesting_level)
323	andl	$~RQF_TIMER,PCPU(reqflags)
324	sti
325	subq	$8,%rsp			/* trapframe->intrframe */
326	movq	%rsp,%rdi		/* pass frame by ref (C arg) */
327	call	lapic_timer_process_frame
328	addq	$8,%rsp			/* intrframe->trapframe */
329	decl	PCPU(intr_nesting_level)
330	movl	%r12d,%eax		/* restore cpl for loop */
331	jmp	doreti_next
332
333	/*
334	 * SPLZ() a C callable procedure to dispatch any unmasked pending
335	 *	  interrupts regardless of critical section nesting.  ASTs
336	 *	  are not dispatched.
337	 *
338	 * 	  Use %eax to track those IRQs that could not be processed
339	 *	  due to BGL requirements.
340	 */
341	SUPERALIGN_TEXT
342
343ENTRY(splz)
344	pushfq
345	pushq	%rbx
346	movq	PCPU(curthread),%rbx
347	incl	TD_CRITCOUNT(%rbx)
348	movq	$0,%rax
349
350splz_next:
351	cli
352	movq	%rax,%rcx		/* rcx = ~CPL */
353	notq	%rcx
354	testl	$RQF_IPIQ,PCPU(reqflags)
355	jnz	splz_ipiq
356	testl	$RQF_TIMER,PCPU(reqflags)
357	jnz	splz_timer
358	/*
359	 * check for an unmasked int (3 groups)
360	 */
361	movq	$0,%rdx
362	testq	PCPU_E8(ipending,%rdx),%rcx
363	jnz	splz_fast
364
365	movq	$1,%rdx
366	testq	PCPU_E8(ipending,%rdx),%rcx
367	jnz	splz_fast
368
369	movq	$2,%rdx
370	testq	PCPU_E8(ipending,%rdx),%rcx
371	jnz	splz_fast
372
373	movl	PCPU(spending),%ecx
374	cmpl	$0,%ecx
375	jnz	splz_soft
376
377	decl	TD_CRITCOUNT(%rbx)
378
379	/*
380	 * Nothing left to do, finish up.  Interrupts are still disabled.
381	 * If our mask of IRQs we couldn't process due to BGL requirements
382	 * is 0 then there are no pending interrupt sources left and we
383	 * can clear RQF_INTPEND.
384	 */
385	testl	%eax,%eax
386	jnz	5f
387	andl	$~RQF_INTPEND,PCPU(reqflags)
3885:
389	popq	%rbx
390	popfq
391	ret
392
393	/*
394	 * Interrupt pending
395	 */
396	ALIGN_TEXT
397splz_fast:
398	andq	PCPU_E8(ipending,%rdx),%rcx
399	sti
400	bsfq	%rcx, %rcx		/* locate the next dispatchable int */
401	btrq	%rcx, PCPU_E8(ipending,%rdx)
402					/* is it really still pending? */
403	jnc	splz_next
404
405	shlq	$6, %rdx
406	orq	%rdx, %rcx		/* form intr number */
407
408	pushq	%rax
409	call	dofastunpend		/* unpend intr %rcx */
410	popq	%rax
411	jmp	splz_next
412
413	/*
414	 *  SOFT interrupt pending
415	 *
416	 *  Temporarily back-out our critical section to allow the interrupt
417	 *  preempt us.
418	 */
419	ALIGN_TEXT
420splz_soft:
421	sti
422	bsfl	%ecx,%ecx		/* locate the next pending softint */
423	btrl	%ecx,PCPU(spending)	/* make sure its still pending */
424	jnc	splz_next
425	addl	$FIRST_SOFTINT,%ecx	/* actual intr number */
426	sti
427	pushq	%rax
428	movl	%ecx,%edi		/* C argument */
429	incl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
430	decl	TD_CRITCOUNT(%rbx)
431	call	sched_ithd_soft		/* YYY must pull in imasks */
432	incl	TD_CRITCOUNT(%rbx)
433	decl	TD_NEST_COUNT(%rbx)	/* prevent doreti/splz nesting */
434	popq	%rax
435	jmp	splz_next
436
437splz_ipiq:
438	andl	$~RQF_IPIQ,PCPU(reqflags)
439	sti
440	pushq	%rax
441	call	lwkt_process_ipiq
442	popq	%rax
443	jmp	splz_next
444
445splz_timer:
446	andl	$~RQF_TIMER,PCPU(reqflags)
447	sti
448	pushq	%rax
449	call	lapic_timer_process
450	popq	%rax
451	jmp	splz_next
452
453	/*
454	 * dofastunpend(%rcx:intr)
455	 *
456	 * A interrupt previously made pending can now be run,
457	 * execute it by pushing a dummy interrupt frame and
458	 * calling ithread_fast_handler to execute or schedule it.
459	 *
460	 * ithread_fast_handler() returns 0 if it wants us to unmask
461	 * further interrupts.
462	 */
463#define PUSH_DUMMY							\
464	pushfq ;			/* phys int frame / flags */	\
465	xorq	%rax,%rax ;		/* something not SEL_UPL */	\
466	pushq	%rax ;			/* phys int frame / cs */	\
467	pushq	3*8(%rsp) ;		/* original caller eip */	\
468	subq	$TF_RIP,%rsp ;		/* trap frame */		\
469	movq	$0,TF_XFLAGS(%rsp) ;	/* extras */			\
470	movq	$0,TF_TRAPNO(%rsp) ;	/* extras */			\
471	movq	$0,TF_ADDR(%rsp) ;	/* extras */			\
472	movq	$0,TF_FLAGS(%rsp) ;	/* extras */			\
473	movq	$0,TF_ERR(%rsp) ;	/* extras */			\
474
475#define POP_DUMMY							\
476	addq	$TF_RIP+(3*8),%rsp ;					\
477
478dofastunpend:
479	pushq	%rbp			/* frame for backtrace */
480	movq	%rsp,%rbp
481	PUSH_DUMMY
482	pushq	%rcx			/* last part of intrframe = intr */
483	incl	fastunpend_count
484	movq	%rsp,%rdi		/* pass frame by reference C arg */
485	call	ithread_fast_handler	/* returns 0 to unmask */
486	popq	%rdi			/* intrframe->trapframe */
487					/* + also rdi C arg to next call */
488	cmpl	$0,%eax
489	jnz	1f
490	movq	MachIntrABI + MACHINTR_INTREN, %rax
491	callq	*%rax			/* MachIntrABI.intren(intr) */
4921:
493	POP_DUMMY
494	popq	%rbp
495	ret
496
497