xref: /netbsd/sys/arch/i386/i386/spl.S (revision 6550d01e)
1/*	$NetBSD: spl.S,v 1.35 2009/11/27 03:23:10 rmind Exp $	*/
2
3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <machine/asm.h>
33__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.35 2009/11/27 03:23:10 rmind Exp $");
34
35#include "opt_vm86.h"
36#include "opt_ddb.h"
37#include "opt_spldebug.h"
38#include "opt_xen.h"
39
40#include <machine/trap.h>
41#include <machine/segments.h>
42#include <machine/frameasm.h>
43
44#include "assym.h"
45
46	.text
47
48#ifndef XEN
49/*
50 * int splraise(int s);
51 */
52ENTRY(splraise)
53	movl	4(%esp),%edx
54	movl	CPUVAR(ILEVEL),%eax
55	cmpl	%edx,%eax
56	ja	1f
57	movl	%edx,CPUVAR(ILEVEL)
581:
59#ifdef SPLDEBUG
60	pushl	%ebp
61	movl	%esp,%ebp
62	pushl	%eax
63	pushl	%edx
64	call	_C_LABEL(spldebug_raise)
65	addl	$4, %esp
66	popl	%eax
67	popl	%ebp
68#endif /* SPLDEBUG */
69	ret
70END(splraise)
71
72/*
73 * void spllower(int s);
74 *
75 * spllower() for i486 and Pentium.  Must be the same size as
76 * i686_spllower().  This must use pushf/cli/popf as it is used
77 * early in boot where interrupts are disabled via eflags/IE.
78 */
79ENTRY(spllower)
80#ifdef SPLDEBUG
81	movl	4(%esp), %ecx
82	pushl	%ebp
83	movl	%esp,%ebp
84	pushl	%ecx
85	call	_C_LABEL(spldebug_lower)
86	addl	$4, %esp
87	popl	%ebp
88#endif /* SPLDEBUG */
89	movl	4(%esp), %ecx
90	cmpl	CPUVAR(ILEVEL), %ecx
91	jae	1f
92	movl	CPUVAR(IUNMASK)(,%ecx,4), %edx
93	pushf
94	cli
95	testl	CPUVAR(IPENDING), %edx
96	jnz	2f
97	movl	%ecx, CPUVAR(ILEVEL)
98	popf
991:
100	ret
1012:
102	popf
103	jmp	_C_LABEL(Xspllower)
104	.align	32
105LABEL(spllower_end)
106END(spllower)
107
108/*
109 * void	cx8_spllower(int s);
110 *
111 * spllower() optimized for Pentium Pro and later, which have long
112 * pipelines that will be stalled by pushf/cli/popf.  Must be the
113 * same size as spllower().  Does not need to restore eflags/IE as
114 * is patched in once autoconf is underway.
115 *
116 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
117 *
118 * edx : eax = old level / old ipending
119 * ecx : ebx = new level / old ipending
120 */
121ENTRY(cx8_spllower)
122	movl	4(%esp),%ecx
123	movl	CPUVAR(ILEVEL),%edx
124	cmpl	%edx,%ecx			/* new level is lower? */
125	pushl	%ebx
126	jae,pn	1f
1270:
128	movl	CPUVAR(IPENDING),%eax
129	testl	%eax,CPUVAR(IUNMASK)(,%ecx,4)	/* deferred interrupts? */
130	movl	%eax,%ebx
131	jnz,pn	2f
132	cmpxchg8b CPUVAR(ISTATE)		/* swap in new ilevel */
133	jnz,pn	0b
1341:
135	popl	%ebx
136	ret
1372:
138	popl	%ebx
139LABEL(cx8_spllower_patch)
140	jmp	_C_LABEL(Xspllower)
141	.align	32
142LABEL(cx8_spllower_end)
143END(cx8_spllower)
144
145#endif /* XEN */
146
147/*
148 * void Xspllower(int s);
149 *
150 * Process pending interrupts.
151 *
152 * Important registers:
153 *   ebx - cpl
154 *   esi - address to resume loop at
155 *   edi - scratch for Xsoftnet
156 *
157 * It is important that the bit scan instruction is bsr, it will get
158 * the highest 2 bits (currently the IPI and clock handlers) first,
159 * to avoid deadlocks where one CPU sends an IPI, another one is at
160 * splhigh() and defers it, lands in here via splx(), and handles
161 * a lower-prio one first, which needs to take the kernel lock -->
162 * the sending CPU will never see the that CPU accept the IPI
163 * (see pmap_tlb_shootnow).
164 */
165	nop	/* Don't get confused with cx8_spllower_end */
166
167IDTVEC(spllower)
168	pushl	%ebp
169	movl	%esp,%ebp
170	MCOUNT_ASM
171	pushl	%ebx
172	pushl	%esi
173	pushl	%edi
174	movl	8(%ebp),%ebx
175	movl	$.Lspllower_resume,%esi		# address to resume loop at
1761:
177# because of the way Xen interrupts works *%esi will in fact be called
178# from Xdoreti via iret. So we have to always disable interrupts here
179# for Xen
180#ifndef XEN
181	CLI(%eax)
182#endif
183.Lspllower_resume:
184#ifdef XEN
185	CLI(%eax)
186#endif
187#if defined(DEBUG)
188#ifndef XEN
189	pushf
190	popl	%eax
191	testl	$PSL_I,%eax
192	jnz	.Lspllower_panic
193#else
194	movl    CPUVAR(VCPU),%eax
195	movb	EVTCHN_UPCALL_MASK(%eax), %al
196	andb	%al, %al
197	jz	.Lspllower_panic
198#endif /* XEN */
199#endif /* defined(DEBUG) */
200	movl	%ebx,%eax		# get cpl
201	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
202	andl	CPUVAR(IPENDING),%eax		# any non-masked bits left?
203	jz	2f
204	bsrl	%eax,%eax
205	btrl	%eax,CPUVAR(IPENDING)
206	movl	CPUVAR(ISOURCES)(,%eax,4),%eax
207	jmp	*IS_RECURSE(%eax)
2082:
209	movl	%ebx,CPUVAR(ILEVEL)
210#ifdef XEN
211	STIC(%eax)
212	jz 4f
213	call	_C_LABEL(stipending)
214	testl	%eax,%eax
215	jnz	1b
2164:
217#else
218	STI(%eax)
219#endif /* XEN */
220	popl	%edi
221	popl	%esi
222	popl	%ebx
223	leave
224	ret
225#if defined(DEBUG)
226.Lspllower_panic:
227	addl $8, %esp
228	pushl	$1f
229	call	_C_LABEL(panic)
2301:	.asciz	"SPLLOWER: INTERRUPT ENABLED"
231#endif /* defined(DEBUG) */
232IDTVEC_END(spllower)
233
234/*
235 * Handle return from interrupt after device handler finishes.
236 *
237 * Important registers:
238 *   ebx - cpl to restore
239 *   esi - address to resume loop at
240 *   edi - scratch for Xsoftnet
241 *
242 * called with interrupt disabled.
243 */
244IDTVEC(doreti)
245#ifndef XEN
246	IDEPTH_DECR
247	popl	%ebx			# get previous priority
248#endif
249.Ldoreti_resume_stic:
250	movl	$.Ldoreti_resume,%esi	# address to resume loop at
251.Ldoreti_resume:
252#if defined(DEBUG)
253#ifndef XEN
254	pushf
255	popl	%eax
256	testl	$PSL_I,%eax
257	jnz	.Ldoreti_panic
258#else
259	movl    CPUVAR(VCPU),%eax
260	movb	EVTCHN_UPCALL_MASK(%eax), %al
261	andb	%al, %al
262	jz	.Ldoreti_panic
263#endif /* XEN */
264#endif /* defined(DEBUG) */
265	movl	%ebx,%eax
266	movl	CPUVAR(IUNMASK)(,%eax,4),%eax
267	andl	CPUVAR(IPENDING),%eax
268	jz	2f
269	bsrl    %eax,%eax               # slow, but not worth optimizing
270	btrl    %eax,CPUVAR(IPENDING)
271	movl	CPUVAR(ISOURCES)(,%eax, 4),%eax
272	jmp	*IS_RESUME(%eax)
2732:	/* Check for ASTs on exit to user mode. */
274	movl	%ebx,CPUVAR(ILEVEL)
2755:
276	testb   $CHK_UPL,TF_CS(%esp)
277	jnz	doreti_checkast
278#ifdef VM86
279	testl	$PSL_VM,TF_EFLAGS(%esp)
280	jz	6f
281#else
282	jmp	6f
283#endif
284	.globl doreti_checkast
285doreti_checkast:
286	CHECK_ASTPENDING(%eax)
287	jz	3f
288	CLEAR_ASTPENDING(%eax)
289	STI(%eax)
290	movl	$T_ASTFLT,TF_TRAPNO(%esp)	/* XXX undo later.. */
291	/* Pushed T_ASTFLT into tf_trapno on entry. */
292	pushl	%esp
293	call	_C_LABEL(trap)
294	addl	$4,%esp
295	CLI(%eax)
296	jmp	5b
2973:
298	CHECK_DEFERRED_SWITCH
299	jnz	9f
3006:
301#ifdef XEN
302	STIC(%eax)
303	jz	4f
304	call	_C_LABEL(stipending)
305	testl   %eax,%eax
306	jz 4f
307	CLI(%eax)
308	jmp	.Ldoreti_resume_stic
3094:
310#endif /* XEN */
311	INTRFASTEXIT
3129:
313	STI(%eax)
314	call	_C_LABEL(pmap_load)
315	CLI(%eax)
316	jmp	doreti_checkast	/* recheck ASTs */
317#if defined(DEBUG)
318.Ldoreti_panic:
319	pushl	$1f
320	call	_C_LABEL(panic)
3211:	.asciz	"DORETI: INTERRUPT ENABLED"
322#endif /* defined(DEBUG) */
323IDTVEC_END(doreti)
324
325#ifdef XEN
326/*
327 * void evtchn_do_event(int evtch, struct intrframe *regs)
328 */
329
330ENTRY(call_evtchn_do_event)
331	IDEPTH_INCR
332	/*
333	 * IDEPTH_INCR leaves old %esp in %eax.
334	 */
335	pushl	8(%eax)	/* regs */
336	pushl	4(%eax)	/* evtch */
337	call	_C_LABEL(evtchn_do_event)
338	addl	$8, %esp
339	IDEPTH_DECR
340	ret
341END(call_evtchn_do_event)
342#ifdef DOM0OPS
343/*
344 * void xenevt_event(int port)
345 */
346
347ENTRY(call_xenevt_event)
348	IDEPTH_INCR
349	/*
350	 * IDEPTH_INCR leaves old %esp in %eax.
351	 */
352	pushl	4(%eax)	/* evtch */
353	call	_C_LABEL(xenevt_event)
354	addl	$4, %esp
355	IDEPTH_DECR
356	ret
357END(call_xenevt_event)
358#endif /* DOM0OPS */
359#endif /* XEN */
360
361#ifndef XEN
362/*
363 * Xsoftintr()
364 *
365 * Switch to the LWP assigned to handle interrupts from the given
366 * source.  We borrow the VM context from the interrupted LWP.
367 *
368 * On entry:
369 *
370 *	%eax		intrsource
371 *	%esi		address to return to
372 */
373IDTVEC(softintr)
374	pushl	$_C_LABEL(softintr_ret)	/* set up struct switchframe */
375	pushl	%ebx
376	pushl	%esi
377	pushl	%edi
378	movl	$IPL_HIGH,CPUVAR(ILEVEL)
379	movl	CPUVAR(CURLWP),%esi
380	movl	IS_LWP(%eax),%edi	/* switch to handler LWP */
381	movl	%edi,CPUVAR(CURLWP)
382	movl	L_PCB(%edi),%edx
383	movl	L_PCB(%esi),%ecx
384	movl	%esp,PCB_ESP(%ecx)
385	movl	%ebp,PCB_EBP(%ecx)
386	movl	PCB_ESP0(%edx),%esp	/* onto new stack */
387	sti
388	pushl	IS_MAXLEVEL(%eax)	/* ipl to run at */
389	pushl	%esi
390	call	_C_LABEL(softint_dispatch)/* run handlers */
391	addl	$8,%esp
392	cli
393	movl	L_PCB(%esi),%ecx
394	movl	PCB_ESP(%ecx),%esp
395	xchgl	%esi,CPUVAR(CURLWP)	/* must be globally visible */
396	popl	%edi			/* unwind switchframe */
397	popl	%esi
398	addl	$8,%esp
399	jmp	*%esi			/* back to splx/doreti */
400IDTVEC_END(softintr)
401
402/*
403 * softintr_ret()
404 *
405 * Trampoline function that gets returned to by cpu_switchto() when
406 * an interrupt handler blocks.  On entry:
407 *
408 *	%eax		prevlwp from cpu_switchto()
409 */
410ENTRY(softintr_ret)
411	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
412	movl	$0, L_CTXSWTCH(%eax)	/* %eax from cpu_switchto */
413	cli
414	jmp	*%esi			/* back to splx/doreti */
415END(softintr_ret)
416
417/*
418 * void softint_trigger(uintptr_t machdep);
419 *
420 * Software interrupt registration.
421 */
422ENTRY(softint_trigger)
423	movl	4(%esp),%eax
424	orl	%eax,CPUVAR(IPENDING)	/* atomic on local cpu */
425	ret
426END(softint_trigger)
427
428/*
429 * Xpreemptrecurse()
430 *
431 * Handles preemption interrupts via Xspllower().
432 */
433IDTVEC(preemptrecurse)
434	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
435	sti
436	pushl	$0
437	call	_C_LABEL(kpreempt)
438	addl	$4, %esp
439	cli
440	jmp	*%esi
441IDTVEC_END(preemptrecurse)
442
443/*
444 * Xpreemptresume()
445 *
446 * Handles preemption interrupts via Xdoreti().
447 */
448IDTVEC(preemptresume)
449	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
450	sti
451	testb   $CHK_UPL, TF_CS(%esp)
452	jnz	1f
453#ifdef VM86
454	testl	$PSL_VM,TF_EFLAGS(%esp)
455	jnz	1f
456#endif
457	movl	TF_EIP(%esp), %eax
458	pushl	%eax
459	call	_C_LABEL(kpreempt)		# from kernel
460	addl	$4, %esp
461	cli
462	jmp	*%esi
4631:
464	call	_C_LABEL(preempt)		# from user
465	cli
466	jmp	*%esi
467IDTVEC_END(preemptresume)
468#endif /* !XEN */
469