xref: /netbsd/sys/arch/i386/i386/spl.S (revision de64ca6f)
1*de64ca6fSriastradh/*	$NetBSD: spl.S,v 1.58 2023/03/01 08:38:50 riastradh Exp $	*/
2c4031162Sfvdl
3c4031162Sfvdl/*
4c200136eSad * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5c4031162Sfvdl * All rights reserved.
6c4031162Sfvdl *
7c4031162Sfvdl * This code is derived from software contributed to The NetBSD Foundation
8b07ec3fcSad * by Charles M. Hannum and Andrew Doran.
9c4031162Sfvdl *
10c4031162Sfvdl * Redistribution and use in source and binary forms, with or without
11c4031162Sfvdl * modification, are permitted provided that the following conditions
12c4031162Sfvdl * are met:
13c4031162Sfvdl * 1. Redistributions of source code must retain the above copyright
14c4031162Sfvdl *    notice, this list of conditions and the following disclaimer.
15c4031162Sfvdl * 2. Redistributions in binary form must reproduce the above copyright
16c4031162Sfvdl *    notice, this list of conditions and the following disclaimer in the
17c4031162Sfvdl *    documentation and/or other materials provided with the distribution.
18c4031162Sfvdl *
19c4031162Sfvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20c4031162Sfvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21c4031162Sfvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22c4031162Sfvdl * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23c4031162Sfvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24c4031162Sfvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25c4031162Sfvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26c4031162Sfvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27c4031162Sfvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28c4031162Sfvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29c4031162Sfvdl * POSSIBILITY OF SUCH DAMAGE.
30c4031162Sfvdl */
31c4031162Sfvdl
32a4914dc7Slukem#include <machine/asm.h>
33*de64ca6fSriastradh__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.58 2023/03/01 08:38:50 riastradh Exp $");
34a4914dc7Slukem
35082d4707Sfvdl#include "opt_ddb.h"
36e48f8429Sdyoung#include "opt_spldebug.h"
3726315d04Sbouyer#include "opt_xen.h"
389d841d6eSad
39c4031162Sfvdl#include <machine/trap.h>
40c4031162Sfvdl#include <machine/segments.h>
41c4031162Sfvdl#include <machine/frameasm.h>
42c4031162Sfvdl
43c4031162Sfvdl#include "assym.h"
44c4031162Sfvdl
45c4031162Sfvdl	.text
46c4031162Sfvdl
47c4031162Sfvdl/*
48f0301095Syamt * int splraise(int s);
49f0301095Syamt */
50f0301095SyamtENTRY(splraise)
51f0301095Syamt	movl	4(%esp),%edx
5287531432Sknakahara	movzbl	CPUVAR(ILEVEL),%eax
53f0301095Syamt	cmpl	%edx,%eax
54f0301095Syamt	ja	1f
5587531432Sknakahara	movb	%dl,CPUVAR(ILEVEL)
56f0301095Syamt1:
57e48f8429Sdyoung#ifdef SPLDEBUG
58e48f8429Sdyoung	pushl	%ebp
59e48f8429Sdyoung	movl	%esp,%ebp
60e48f8429Sdyoung	pushl	%eax
61e48f8429Sdyoung	pushl	%edx
62e48f8429Sdyoung	call	_C_LABEL(spldebug_raise)
63e48f8429Sdyoung	addl	$4,%esp
64e48f8429Sdyoung	popl	%eax
65e48f8429Sdyoung	popl	%ebp
66e48f8429Sdyoung#endif /* SPLDEBUG */
67f0301095Syamt	ret
68c0b85df1SyamtEND(splraise)
69f0301095Syamt
70bf5abc14Scherry#ifndef XENPV
71f0301095Syamt/*
72b07ec3fcSad * void spllower(int s);
73b07ec3fcSad *
74cfe64f82Smaxv * spllower() for i486 and Pentium. Must be the same size as cx8_spllower(),
75bb121b2cSknakahara * that is, 96 bytes. This must use pushf/cli/popf as it is used early in boot
76cfe64f82Smaxv * where interrupts are disabled via eflags/IE.
77b07ec3fcSad */
78b07ec3fcSadENTRY(spllower)
79bb121b2cSknakahara	HOTPATCH(HP_NAME_SPLLOWER, 96)
80e48f8429Sdyoung#ifdef SPLDEBUG
81e48f8429Sdyoung	movl	4(%esp),%ecx
82e48f8429Sdyoung	pushl	%ebp
83e48f8429Sdyoung	movl	%esp,%ebp
84e48f8429Sdyoung	pushl	%ecx
85e48f8429Sdyoung	call	_C_LABEL(spldebug_lower)
86e48f8429Sdyoung	addl	$4,%esp
87e48f8429Sdyoung	popl	%ebp
88e48f8429Sdyoung#endif /* SPLDEBUG */
89b07ec3fcSad	movl	4(%esp),%ecx
9087531432Sknakahara	cmpb	CPUVAR(ILEVEL),%cl
91b07ec3fcSad	jae	1f
9287531432Sknakahara	movl	CPUVAR(IUNMASK)(,%ecx,8),%edx
9387531432Sknakahara	movl	CPUVAR(IUNMASK)+4(,%ecx,8),%eax
9422e594a0Sbouyer	PUSHF(%eax)
9522e594a0Sbouyer	CLI(%eax)
969d841d6eSad	testl	CPUVAR(IPENDING),%edx
97b07ec3fcSad	jnz	2f
9887531432Sknakahara	testl	CPUVAR(IPENDING)+4,%eax
9987531432Sknakahara	jnz	2f
10087531432Sknakahara	movb	%cl,CPUVAR(ILEVEL)
10122e594a0Sbouyer	POPF(%eax)
102b07ec3fcSad1:
103b07ec3fcSad	ret
104b07ec3fcSad2:
105b07ec3fcSad	popf
106b07ec3fcSad	jmp	_C_LABEL(Xspllower)
1079d841d6eSad	.align	32
108c0b85df1SyamtEND(spllower)
10922e594a0Sbouyer#else  /* XENPV */
11022e594a0SbouyerSTRONG_ALIAS(spllower, cx8_spllower)
11122e594a0Sbouyer#endif /* !XENPV */
112b07ec3fcSad
113b07ec3fcSad/*
1147135f4bdSad * void	cx8_spllower(int s);
115b07ec3fcSad *
116cfe64f82Smaxv * spllower() optimized for Pentium Pro and later, which have long pipelines
117cfe64f82Smaxv * that will be stalled by pushf/cli/popf.  Must be the same size as
118bb121b2cSknakahara * spllower(), ie 96 bytes.  Does not need to restore eflags/IE as is patched
119cfe64f82Smaxv * in once autoconf is underway.
120b07ec3fcSad *
121b07ec3fcSad * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
122b07ec3fcSad *
12387531432Sknakahara * edx : eax = old level + high 24 bit old ipending / low 32 bit old ipending
12487531432Sknakahara * ecx : ebx = new level + high 24 bit old ipending / low 32 bit old ipending
125b07ec3fcSad */
1267135f4bdSadENTRY(cx8_spllower)
127b07ec3fcSad	movl	4(%esp),%ecx
12887531432Sknakahara	movzbl	CPUVAR(ILEVEL),%edx
129b07ec3fcSad	cmpl	%edx,%ecx			/* new level is lower? */
1300718c94bSjoerg	jae	1f
13187531432Sknakahara	pushl	%ebx
13287531432Sknakahara	pushl	%esi
13387531432Sknakahara	pushl	%edi
13487531432Sknakahara	movl	%ecx,%esi
13587531432Sknakahara	movl	%ecx,%edi
13687531432Sknakahara	shll	$24,%edi
137f0301095Syamt0:
138b07ec3fcSad	movl	CPUVAR(IPENDING),%eax
13987531432Sknakahara	testl	%eax,CPUVAR(IUNMASK)(,%esi,8)	/* deferred interrupts? */
1400718c94bSjoerg	jnz	2f
14187531432Sknakahara	movl	CPUVAR(IPENDING)+4,%edx
14287531432Sknakahara	testl	%edx,CPUVAR(IUNMASK)+4(,%esi,8)
14387531432Sknakahara	jnz	2f
14487531432Sknakahara	movl	%eax,%ebx
14587531432Sknakahara	movl	%edx,%ecx
14687531432Sknakahara	andl	$0x00ffffff,%ecx
14787531432Sknakahara	orl	%edi,%ecx
148b07ec3fcSad	cmpxchg8b CPUVAR(ISTATE)		/* swap in new ilevel */
1490718c94bSjoerg	jnz	0b
15087531432Sknakahara	popl	%edi
15187531432Sknakahara	popl	%esi
152b07ec3fcSad	popl	%ebx
15387531432Sknakahara1:
154b07ec3fcSad	ret
155b07ec3fcSad2:
15687531432Sknakahara	popl	%edi
15787531432Sknakahara	popl	%esi
158b07ec3fcSad	popl	%ebx
159da5241c8Suebayasi
160e2320c34Smaxv	/* The reference must be absolute, hence the indirect jump. */
161e2320c34Smaxv	movl	$Xspllower,%eax
162e2320c34Smaxv	jmp	*%eax
163e2320c34Smaxv
164e2320c34Smaxv	.align	32, 0xCC
1657135f4bdSadLABEL(cx8_spllower_end)
166c0b85df1SyamtEND(cx8_spllower)
167b07ec3fcSad
168b07ec3fcSad/*
169b07ec3fcSad * void Xspllower(int s);
170b07ec3fcSad *
171c4031162Sfvdl * Process pending interrupts.
172c4031162Sfvdl *
173c4031162Sfvdl * Important registers:
174c4031162Sfvdl *   ebx - cpl
175c4031162Sfvdl *   esi - address to resume loop at
176c4031162Sfvdl *   edi - scratch for Xsoftnet
177a167f47cSfvdl *
178a167f47cSfvdl * It is important that the bit scan instruction is bsr, it will get
179a167f47cSfvdl * the highest 2 bits (currently the IPI and clock handlers) first,
180a167f47cSfvdl * to avoid deadlocks where one CPU sends an IPI, another one is at
1819303fb5aSrmind * splhigh() and defers it, lands in here via splx(), and handles
182a167f47cSfvdl * a lower-prio one first, which needs to take the kernel lock -->
183a167f47cSfvdl * the sending CPU will never see the that CPU accept the IPI
184a167f47cSfvdl * (see pmap_tlb_shootnow).
185c4031162Sfvdl */
1867135f4bdSad	nop	/* Don't get confused with cx8_spllower_end */
187b07ec3fcSad
188c4031162SfvdlIDTVEC(spllower)
189082d4707Sfvdl	pushl	%ebp
190082d4707Sfvdl	movl	%esp,%ebp
191d94ff451Syamt	MCOUNT_ASM
192c4031162Sfvdl	pushl	%ebx
193c4031162Sfvdl	pushl	%esi
194c4031162Sfvdl	pushl	%edi
195a167f47cSfvdl	movl	8(%ebp),%ebx
196f38f8f59Smaxv	movl	$.Lspllower_resume,%esi		/* address to resume loop at */
19726315d04Sbouyer1:
198f38f8f59Smaxv	/*
199f38f8f59Smaxv	 * Because of the way Xen interrupts work *%esi will in fact be called
200f38f8f59Smaxv	 * from Xdoreti via iret. So we have to always disable interrupts here
201f38f8f59Smaxv	 * for Xen.
202f38f8f59Smaxv	 */
203bf5abc14Scherry#ifndef XENPV
20426315d04Sbouyer	CLI(%eax)
20526315d04Sbouyer#endif
20698cfc473Syamt.Lspllower_resume:
207bf5abc14Scherry#ifdef XENPV
20826315d04Sbouyer	CLI(%eax)
20926315d04Sbouyer#endif
21098cfc473Syamt#if defined(DEBUG)
211bf5abc14Scherry#ifndef XENPV
21298cfc473Syamt	pushf
21398cfc473Syamt	popl	%eax
21498cfc473Syamt	testl	$PSL_I,%eax
21598cfc473Syamt	jnz	.Lspllower_panic
21626315d04Sbouyer#else
21720161b72Scegger	movl    CPUVAR(VCPU),%eax
21826315d04Sbouyer	movb	EVTCHN_UPCALL_MASK(%eax),%al
21926315d04Sbouyer	andb	%al,%al
22026315d04Sbouyer	jz	.Lspllower_panic
221bf5abc14Scherry#endif /* XENPV */
22298cfc473Syamt#endif /* defined(DEBUG) */
223f38f8f59Smaxv	movl	%ebx,%eax			/* get cpl */
22487531432Sknakahara	movl	CPUVAR(IUNMASK)+4(,%eax,8),%eax
22587531432Sknakahara	andl	CPUVAR(IPENDING)+4,%eax		/* any non-masked bits left? */
22687531432Sknakahara	jz	10f
22787531432Sknakahara	bsrl	%eax,%eax
22887531432Sknakahara	btrl	%eax,CPUVAR(IPENDING)+4
22987531432Sknakahara	addl	$32,%eax
23087531432Sknakahara	movl	CPUVAR(ISOURCES)(,%eax,4),%eax
23187531432Sknakahara	jmp	*IS_RECURSE(%eax)
23287531432Sknakahara10:
23387531432Sknakahara	movl	%ebx,%eax			/* get cpl */
23487531432Sknakahara	movl	CPUVAR(IUNMASK)(,%eax,8),%eax
235f38f8f59Smaxv	andl	CPUVAR(IPENDING),%eax		/* any non-masked bits left? */
236c4031162Sfvdl	jz	2f
237a167f47cSfvdl	bsrl	%eax,%eax
238c4031162Sfvdl	btrl	%eax,CPUVAR(IPENDING)
239c4031162Sfvdl	movl	CPUVAR(ISOURCES)(,%eax,4),%eax
240c4031162Sfvdl	jmp	*IS_RECURSE(%eax)
241a167f47cSfvdl2:
24287531432Sknakahara	movb	%bl,CPUVAR(ILEVEL)
24322e594a0Sbouyer#ifdef XENPV
24426315d04Sbouyer	STIC(%eax)
24526315d04Sbouyer	jz 4f
24626315d04Sbouyer	call	_C_LABEL(stipending)
24726315d04Sbouyer	testl	%eax,%eax
24826315d04Sbouyer	jnz	1b
24926315d04Sbouyer4:
25026315d04Sbouyer#else
25126315d04Sbouyer	STI(%eax)
252f38f8f59Smaxv#endif
253a167f47cSfvdl	popl	%edi
254c4031162Sfvdl	popl	%esi
255c4031162Sfvdl	popl	%ebx
256082d4707Sfvdl	leave
257c4031162Sfvdl	ret
25898cfc473Syamt#if defined(DEBUG)
25998cfc473Syamt.Lspllower_panic:
26098cfc473Syamt	pushl	$1f
26198cfc473Syamt	call	_C_LABEL(panic)
26298cfc473Syamt1:	.asciz	"SPLLOWER: INTERRUPT ENABLED"
263f38f8f59Smaxv#endif
264c0b85df1SyamtIDTVEC_END(spllower)
265c4031162Sfvdl
266c4031162Sfvdl/*
26722e594a0Sbouyer * Xdoreti: Handle return from interrupt after device handler finishes.
268c4031162Sfvdl *
269c4031162Sfvdl * Important registers:
270c4031162Sfvdl *   ebx - cpl to restore
271c4031162Sfvdl *   esi - address to resume loop at
272c4031162Sfvdl *   edi - scratch for Xsoftnet
27398cfc473Syamt *
27498cfc473Syamt * called with interrupt disabled.
275c4031162Sfvdl */
276c4031162SfvdlIDTVEC(doreti)
27798cfc473Syamt	IDEPTH_DECR
278f38f8f59Smaxv	popl	%ebx			/* get previous priority */
27926315d04Sbouyer.Ldoreti_resume_stic:
280f38f8f59Smaxv	movl	$.Ldoreti_resume,%esi	/* address to resume loop at */
28198cfc473Syamt.Ldoreti_resume:
2824bc95673Smaxv
28398cfc473Syamt#if defined(DEBUG)
284bf5abc14Scherry#ifndef XENPV
28598cfc473Syamt	pushf
28698cfc473Syamt	popl	%eax
28798cfc473Syamt	testl	$PSL_I,%eax
28898cfc473Syamt	jnz	.Ldoreti_panic
28926315d04Sbouyer#else
29020161b72Scegger	movl	CPUVAR(VCPU),%eax
29126315d04Sbouyer	movb	EVTCHN_UPCALL_MASK(%eax),%al
29226315d04Sbouyer	andb	%al,%al
29326315d04Sbouyer	jz	.Ldoreti_panic
294bf5abc14Scherry#endif /* XENPV */
29598cfc473Syamt#endif /* defined(DEBUG) */
2964bc95673Smaxv
29798cfc473Syamt	movl	%ebx,%eax
29887531432Sknakahara	movl	CPUVAR(IUNMASK)+4(,%eax,8),%eax
29987531432Sknakahara	andl	CPUVAR(IPENDING)+4,%eax
30087531432Sknakahara	jz	10f
30187531432Sknakahara	bsrl	%eax,%eax		/* slow, but not worth optimizing */
30287531432Sknakahara	btrl	%eax,CPUVAR(IPENDING)+4
30387531432Sknakahara	addl	$32,%eax
30487531432Sknakahara	movl	CPUVAR(ISOURCES)(,%eax, 4),%eax
30587531432Sknakahara	jmp	*IS_RESUME(%eax)
30687531432Sknakahara10:
30787531432Sknakahara	movl	%ebx,%eax
30887531432Sknakahara	movl	CPUVAR(IUNMASK)(,%eax,8),%eax
309c4031162Sfvdl	andl	CPUVAR(IPENDING),%eax
310c4031162Sfvdl	jz	2f
311f38f8f59Smaxv	bsrl	%eax,%eax		/* slow, but not worth optimizing */
312c4031162Sfvdl	btrl	%eax,CPUVAR(IPENDING)
313c4031162Sfvdl	movl	CPUVAR(ISOURCES)(,%eax, 4),%eax
314c4031162Sfvdl	jmp	*IS_RESUME(%eax)
31522e594a0Sbouyer2:	/* Check for ASTs on exit to user mode. */
31687531432Sknakahara	movb	%bl,CPUVAR(ILEVEL)
317d749a2d0Syamt5:
31826315d04Sbouyer	testb	$CHK_UPL,TF_CS(%esp)
31926315d04Sbouyer	jnz	doreti_checkast
320d749a2d0Syamt	jmp	6f
3214bc95673Smaxv
322da5241c8Suebayasi	.type	_C_LABEL(doreti_checkast), @function
323da5241c8SuebayasiLABEL(doreti_checkast)
324d749a2d0Syamt	CHECK_ASTPENDING(%eax)
325c4031162Sfvdl	jz	3f
32624a1632cSyamt	CLEAR_ASTPENDING(%eax)
32726315d04Sbouyer	STI(%eax)
328c4031162Sfvdl	movl	$T_ASTFLT,TF_TRAPNO(%esp)	/* XXX undo later.. */
329c4031162Sfvdl	/* Pushed T_ASTFLT into tf_trapno on entry. */
330c43b3542Sfvdl	pushl	%esp
331c4031162Sfvdl	call	_C_LABEL(trap)
332c43b3542Sfvdl	addl	$4,%esp
33326315d04Sbouyer	CLI(%eax)
334a167f47cSfvdl	jmp	5b
335da5241c8SuebayasiEND(doreti_checkast)
336da5241c8Suebayasi
337c4031162Sfvdl3:
33824a1632cSyamt	CHECK_DEFERRED_SWITCH
339d749a2d0Syamt	jnz	9f
34084e0feffSmaxv	HANDLE_DEFERRED_FPU
341d749a2d0Syamt6:
34222e594a0Sbouyer#ifdef XENPV
34326315d04Sbouyer	STIC(%eax)
34426315d04Sbouyer	jz	4f
34526315d04Sbouyer	call	_C_LABEL(stipending)
34626315d04Sbouyer	testl	%eax,%eax
34726315d04Sbouyer	jz	4f
34826315d04Sbouyer	CLI(%eax)
34926315d04Sbouyer	jmp	.Ldoreti_resume_stic
35026315d04Sbouyer4:
351f38f8f59Smaxv#endif
352c4031162Sfvdl	INTRFASTEXIT
353d749a2d0Syamt9:
35426315d04Sbouyer	STI(%eax)
355d749a2d0Syamt	call	_C_LABEL(pmap_load)
35626315d04Sbouyer	CLI(%eax)
35726315d04Sbouyer	jmp	doreti_checkast	/* recheck ASTs */
3584bc95673Smaxv
35998cfc473Syamt#if defined(DEBUG)
36098cfc473Syamt.Ldoreti_panic:
36198cfc473Syamt	pushl	$1f
36298cfc473Syamt	call	_C_LABEL(panic)
36398cfc473Syamt1:	.asciz	"DORETI: INTERRUPT ENABLED"
364f38f8f59Smaxv#endif
365c0b85df1SyamtIDTVEC_END(doreti)
3664b293a84Sad
3674b293a84Sad/*
368104cf0aeSad * Xsoftintr()
3694b293a84Sad *
3704b293a84Sad * Switch to the LWP assigned to handle interrupts from the given
3714b293a84Sad * source.  We borrow the VM context from the interrupted LWP.
3724b293a84Sad *
3734b293a84Sad * On entry:
3744b293a84Sad *
3754b293a84Sad *	%eax		intrsource
3764b293a84Sad *	%esi		address to return to
3774b293a84Sad */
3784b293a84SadIDTVEC(softintr)
3794b293a84Sad	pushl	$_C_LABEL(softintr_ret)	/* set up struct switchframe */
3804b293a84Sad	pushl	%ebx
3814b293a84Sad	pushl	%esi
3824b293a84Sad	pushl	%edi
38387531432Sknakahara	movb	$IPL_HIGH,CPUVAR(ILEVEL)
38422e594a0Sbouyer	STI(%esi)
3854b293a84Sad	movl	CPUVAR(CURLWP),%esi
3864b293a84Sad	movl	IS_LWP(%eax),%edi	/* switch to handler LWP */
387*de64ca6fSriastradh	/*
388*de64ca6fSriastradh	 * Simple MOV to set curlwp to softlwp.  See below on ordering
389*de64ca6fSriastradh	 * required to restore softlwp like cpu_switchto.
390*de64ca6fSriastradh	 *
391*de64ca6fSriastradh	 * 1. Don't need store-before-store barrier because x86 is TSO.
392*de64ca6fSriastradh	 *
393*de64ca6fSriastradh	 * 2. Don't need store-before-load barrier because when we
394*de64ca6fSriastradh	 *    enter a softint lwp, it can't be holding any mutexes, so
395*de64ca6fSriastradh	 *    it can't release any until after it has acquired them, so
396*de64ca6fSriastradh	 *    we need not participate in the protocol with
397*de64ca6fSriastradh	 *    mutex_vector_enter barriers here.
398*de64ca6fSriastradh	 *
399*de64ca6fSriastradh	 * Hence no need for XCHG or barriers around MOV.
400*de64ca6fSriastradh	 */
4014b293a84Sad	movl	%edi,CPUVAR(CURLWP)
4023f18fe81Srmind	movl	L_PCB(%edi),%edx
4033f18fe81Srmind	movl	L_PCB(%esi),%ecx
4044b293a84Sad	movl	%esp,PCB_ESP(%ecx)
4054b293a84Sad	movl	%ebp,PCB_EBP(%ecx)
4064b293a84Sad	movl	PCB_ESP0(%edx),%esp	/* onto new stack */
4074b293a84Sad	pushl	IS_MAXLEVEL(%eax)	/* ipl to run at */
4084b293a84Sad	pushl	%esi
4094b293a84Sad	call	_C_LABEL(softint_dispatch)/* run handlers */
4104b293a84Sad	addl	$8,%esp
41122e594a0Sbouyer	CLI(%ecx)
4123f18fe81Srmind	movl	L_PCB(%esi),%ecx
4134b293a84Sad	movl	PCB_ESP(%ecx),%esp
414c200136eSad
415c200136eSad	/*
416*de64ca6fSriastradh	 * Use XCHG, not MOV, to coordinate mutex_exit on this CPU with
417*de64ca6fSriastradh	 * mutex_vector_enter on another CPU.
418*de64ca6fSriastradh	 *
419*de64ca6fSriastradh	 * 1. Any prior mutex_exit by the softint must be visible to
420*de64ca6fSriastradh	 *    other CPUs before we restore curlwp on this one,
421*de64ca6fSriastradh	 *    requiring store-before-store ordering.
422*de64ca6fSriastradh	 *
423*de64ca6fSriastradh	 *    (This is always guaranteed by the x86 memory model, TSO,
424*de64ca6fSriastradh	 *    but other architectures require a explicit barrier before
425*de64ca6fSriastradh	 *    the store to ci->ci_curlwp.)
426*de64ca6fSriastradh	 *
427*de64ca6fSriastradh	 * 2. Restoring curlwp must be visible on all other CPUs before
428*de64ca6fSriastradh	 *    any subsequent mutex_exit on this one can even test
429*de64ca6fSriastradh	 *    whether there might be waiters, requiring
430*de64ca6fSriastradh	 *    store-before-load ordering.
431*de64ca6fSriastradh	 *
432*de64ca6fSriastradh	 *    (This is the only ordering x86 TSO ever requires any kind
433*de64ca6fSriastradh	 *    of barrier for -- in this case, we take advantage of the
434*de64ca6fSriastradh	 *    sequential consistency implied by XCHG to obviate the
435*de64ca6fSriastradh	 *    need for MFENCE or something.)
436*de64ca6fSriastradh	 *
437*de64ca6fSriastradh	 * See kern_mutex.c for details -- this is necessary for
438*de64ca6fSriastradh	 * adaptive mutexes to detect whether the lwp is on the CPU in
439*de64ca6fSriastradh	 * order to safely block without requiring atomic r/m/w in
440*de64ca6fSriastradh	 * mutex_exit.  See also cpu_switchto.
441c200136eSad	 */
442c200136eSad	xchgl	%esi,CPUVAR(CURLWP)	/* restore ci_curlwp */
4434b293a84Sad	popl	%edi			/* unwind switchframe */
4444b293a84Sad	popl	%esi
4454b293a84Sad	addl	$8,%esp
4464b293a84Sad	jmp	*%esi			/* back to splx/doreti */
447c0b85df1SyamtIDTVEC_END(softintr)
4484b293a84Sad
4494b293a84Sad/*
4504b293a84Sad * softintr_ret()
4514b293a84Sad *
4524b293a84Sad * Trampoline function that gets returned to by cpu_switchto() when
4534b293a84Sad * an interrupt handler blocks.  On entry:
4544b293a84Sad *
4554b293a84Sad *	%eax		prevlwp from cpu_switchto()
4564b293a84Sad */
4576740bb54SchsENTRY(softintr_ret)
4587546fcf0Sad	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
45922e594a0Sbouyer	CLI(%eax)
4604b293a84Sad	jmp	*%esi			/* back to splx/doreti */
461c0b85df1SyamtEND(softintr_ret)
4624b293a84Sad
4634b293a84Sad/*
4644b293a84Sad * void softint_trigger(uintptr_t machdep);
4654b293a84Sad *
4664b293a84Sad * Software interrupt registration.
4674b293a84Sad */
4686740bb54SchsENTRY(softint_trigger)
4694b293a84Sad	movl	4(%esp),%eax
4704b293a84Sad	orl	%eax,CPUVAR(IPENDING)	/* atomic on local cpu */
4714b293a84Sad	ret
472c0b85df1SyamtEND(softint_trigger)
473104cf0aeSad
474104cf0aeSad/*
475c0a9a1b4Schristos * Xrecurse_preempt()
476104cf0aeSad *
477104cf0aeSad * Handles preemption interrupts via Xspllower().
478104cf0aeSad */
479c0a9a1b4SchristosIDTVEC(recurse_preempt)
48087531432Sknakahara	movb	$IPL_PREEMPT,CPUVAR(ILEVEL)
48122e594a0Sbouyer	STI(%eax)
482104cf0aeSad	pushl	$0
483104cf0aeSad	call	_C_LABEL(kpreempt)
484104cf0aeSad	addl	$4,%esp
48522e594a0Sbouyer	CLI(%eax)
486104cf0aeSad	jmp	*%esi
487c0a9a1b4SchristosIDTVEC_END(recurse_preempt)
488104cf0aeSad
489104cf0aeSad/*
490c0a9a1b4Schristos * Xresume_preempt()
491104cf0aeSad *
492104cf0aeSad * Handles preemption interrupts via Xdoreti().
493104cf0aeSad */
494c0a9a1b4SchristosIDTVEC(resume_preempt)
49587531432Sknakahara	movb	$IPL_PREEMPT,CPUVAR(ILEVEL)
49622e594a0Sbouyer	STI(%eax)
497104cf0aeSad	testb	$CHK_UPL,TF_CS(%esp)
498104cf0aeSad	jnz	1f
499104cf0aeSad	movl	TF_EIP(%esp),%eax
500104cf0aeSad	pushl	%eax
501f38f8f59Smaxv	call	_C_LABEL(kpreempt)		/* from kernel */
502104cf0aeSad	addl	$4,%esp
50322e594a0Sbouyer	CLI(%eax)
504104cf0aeSad	jmp	*%esi
505104cf0aeSad1:
506f38f8f59Smaxv	call	_C_LABEL(preempt)		/* from user */
50722e594a0Sbouyer	CLI(%eax)
508104cf0aeSad	jmp	*%esi
509c0a9a1b4SchristosIDTVEC_END(resume_preempt)
510