xref: /freebsd/sys/i386/i386/swtch.S (revision 9768746b)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35#include "opt_sched.h"
36
37#include <machine/asmacros.h>
38
39#include "assym.inc"
40
41#if defined(SMP) && defined(SCHED_ULE)
42#define	SETOP		xchgl
43#define	BLOCK_SPIN(reg)							\
44		movl		$blocked_lock,%eax ;			\
45	100: ;								\
46		lock ;							\
47		cmpxchgl	%eax,TD_LOCK(reg) ;			\
48		jne		101f ;					\
49		pause ;							\
50		jmp		100b ;					\
51	101:
52#else
53#define	SETOP		movl
54#define	BLOCK_SPIN(reg)
55#endif
56
57/*****************************************************************************/
58/* Scheduling                                                                */
59/*****************************************************************************/
60
61	.text
62
63/*
64 * cpu_throw()
65 *
66 * This is the second half of cpu_switch(). It is used when the current
67 * thread is either a dummy or slated to die, and we no longer care
68 * about its state.  This is only a slight optimization and is probably
69 * not worth it anymore.  Note that we need to clear the pm_active bits so
70 * we do need the old proc if it still exists.
71 * 0(%esp) = ret
72 * 4(%esp) = oldtd
73 * 8(%esp) = newtd
74 */
75ENTRY(cpu_throw)
76	movl	PCPU(CPUID), %esi
77	/* release bit from old pm_active */
78	movl	PCPU(CURPMAP), %ebx
79#ifdef SMP
80	lock
81#endif
82	btrl	%esi, PM_ACTIVE(%ebx)		/* clear old */
83	movl	8(%esp),%ecx			/* New thread */
84	movl	TD_PCB(%ecx),%edx
85	/* set bit in new pm_active */
86	movl	TD_PROC(%ecx),%eax
87	movl	P_VMSPACE(%eax), %ebx
88	addl	$VM_PMAP, %ebx
89	movl	%ebx, PCPU(CURPMAP)
90#ifdef SMP
91	lock
92#endif
93	btsl	%esi, PM_ACTIVE(%ebx)		/* set new */
94	jmp	sw1
95END(cpu_throw)
96
97/*
98 * cpu_switch(old, new)
99 *
100 * Save the current thread state, then select the next thread to run
101 * and load its state.
102 * 0(%esp) = ret
103 * 4(%esp) = oldtd
104 * 8(%esp) = newtd
105 * 12(%esp) = newlock
106 */
107ENTRY(cpu_switch)
108
109	/* Switch to new thread.  First, save context. */
110	movl	4(%esp),%ecx
111
112#ifdef INVARIANTS
113	testl	%ecx,%ecx			/* no thread? */
114	jz	badsw2				/* no, panic */
115#endif
116
117	movl	TD_PCB(%ecx),%edx
118
119	movl	(%esp),%eax			/* Hardware registers */
120	movl	%eax,PCB_EIP(%edx)
121	movl	%ebx,PCB_EBX(%edx)
122	movl	%esp,PCB_ESP(%edx)
123	movl	%ebp,PCB_EBP(%edx)
124	movl	%esi,PCB_ESI(%edx)
125	movl	%edi,PCB_EDI(%edx)
126	mov	%gs,PCB_GS(%edx)
127	/* Test if debug registers should be saved. */
128	testl	$PCB_DBREGS,PCB_FLAGS(%edx)
129	jz      1f                              /* no, skip over */
130	movl    %dr7,%eax                       /* yes, do the save */
131	movl    %eax,PCB_DR7(%edx)
132	andl    $0x0000fc00, %eax               /* disable all watchpoints */
133	movl    %eax,%dr7
134	movl    %dr6,%eax
135	movl    %eax,PCB_DR6(%edx)
136	movl    %dr3,%eax
137	movl    %eax,PCB_DR3(%edx)
138	movl    %dr2,%eax
139	movl    %eax,PCB_DR2(%edx)
140	movl    %dr1,%eax
141	movl    %eax,PCB_DR1(%edx)
142	movl    %dr0,%eax
143	movl    %eax,PCB_DR0(%edx)
1441:
145
146	/* have we used fp, and need a save? */
147	cmpl	%ecx,PCPU(FPCURTHREAD)
148	jne	1f
149	pushl	PCB_SAVEFPU(%edx)		/* h/w bugs make saving complicated */
150	call	npxsave				/* do it in a big C function */
151	popl	%eax
1521:
153
154	/* Save is done.  Now fire up new thread. */
155	movl	4(%esp),%edi
156	movl	8(%esp),%ecx			/* New thread */
157	movl	12(%esp),%esi			/* New lock */
158#ifdef INVARIANTS
159	testl	%ecx,%ecx			/* no thread? */
160	jz	badsw3				/* no, panic */
161#endif
162	movl	TD_PCB(%ecx),%edx
163
164	/* Switchout td_lock */
165	movl	%esi,%eax
166	movl	PCPU(CPUID),%esi
167	SETOP	%eax,TD_LOCK(%edi)
168
169	/* Release bit from old pmap->pm_active */
170	movl	PCPU(CURPMAP), %ebx
171#ifdef SMP
172	lock
173#endif
174	btrl	%esi, PM_ACTIVE(%ebx)		/* clear old */
175
176	/* Set bit in new pmap->pm_active */
177	movl	TD_PROC(%ecx),%eax		/* newproc */
178	movl	P_VMSPACE(%eax), %ebx
179	addl	$VM_PMAP, %ebx
180	movl	%ebx, PCPU(CURPMAP)
181#ifdef SMP
182	lock
183#endif
184	btsl	%esi, PM_ACTIVE(%ebx)		/* set new */
185sw1:
186	BLOCK_SPIN(%ecx)
187	/*
188	 * At this point, we have managed thread locks and are ready
189	 * to load up the rest of the next context.
190	 */
191
192	/* Load a pointer to the thread kernel stack into PCPU. */
193	leal	-VM86_STACK_SPACE(%edx), %eax	/* leave space for vm86 */
194	movl	%eax, PCPU(KESP0)
195
196	cmpl	$0, PCB_EXT(%edx)		/* has pcb extension? */
197	je	1f				/* If not, use the default */
198	movl	$1, PCPU(PRIVATE_TSS) 		/* mark use of private tss */
199	movl	PCB_EXT(%edx), %edi		/* new tss descriptor */
200	movl	PCPU(TRAMPSTK), %ebx
201	movl	%ebx, PCB_EXT_TSS+TSS_ESP0(%edi)
202	jmp	2f				/* Load it up */
203
2041:	/*
205	 * Use the common default TSS instead of our own.
206	 * Stack pointer in the common TSS points to the trampoline stack
207	 * already and should be not changed.
208	 *
209	 * Test this CPU's flag to see if this CPU was using a private TSS.
210	 */
211	cmpl	$0, PCPU(PRIVATE_TSS)		/* Already using the common? */
212	je	3f				/* if so, skip reloading */
213	movl	$0, PCPU(PRIVATE_TSS)
214	PCPU_ADDR(COMMON_TSSD, %edi)
2152:
216	/* Move correct tss descriptor into GDT slot, then reload tr. */
217	movl	PCPU(TSS_GDT), %ebx		/* entry in GDT */
218	movl	0(%edi), %eax
219	movl	4(%edi), %esi
220	movl	%eax, 0(%ebx)
221	movl	%esi, 4(%ebx)
222	movl	$GPROC0_SEL*8, %esi		/* GSEL(GPROC0_SEL, SEL_KPL) */
223	ltr	%si
2243:
225
226	/* Copy the %fs and %gs selectors into this pcpu gdt */
227	leal	PCB_FSD(%edx), %esi
228	movl	PCPU(FSGS_GDT), %edi
229	movl	0(%esi), %eax		/* %fs selector */
230	movl	4(%esi), %ebx
231	movl	%eax, 0(%edi)
232	movl	%ebx, 4(%edi)
233	movl	8(%esi), %eax		/* %gs selector, comes straight after */
234	movl	12(%esi), %ebx
235	movl	%eax, 8(%edi)
236	movl	%ebx, 12(%edi)
237
238	/* Restore context. */
239	movl	PCB_EBX(%edx),%ebx
240	movl	PCB_ESP(%edx),%esp
241	movl	PCB_EBP(%edx),%ebp
242	movl	PCB_ESI(%edx),%esi
243	movl	PCB_EDI(%edx),%edi
244	movl	PCB_EIP(%edx),%eax
245	movl	%eax,(%esp)
246
247	movl	%edx, PCPU(CURPCB)
248	movl	%ecx, PCPU(CURTHREAD)		/* into next thread */
249
250	/*
251	 * Determine the LDT to use and load it if is the default one and
252	 * that is not the current one.
253	 */
254	movl	TD_PROC(%ecx),%eax
255	cmpl    $0,P_MD+MD_LDT(%eax)
256	jnz	1f
257	movl	_default_ldt,%eax
258	cmpl	PCPU(CURRENTLDT),%eax
259	je	2f
260	lldt	_default_ldt
261	movl	%eax,PCPU(CURRENTLDT)
262	jmp	2f
2631:
264	/* Load the LDT when it is not the default one. */
265	pushl	%edx				/* Preserve pointer to pcb. */
266	addl	$P_MD,%eax			/* Pointer to mdproc is arg. */
267	pushl	%eax
268	/*
269	 * Holding dt_lock prevents context switches, so dt_lock cannot
270	 * be held now and set_user_ldt() will not deadlock acquiring it.
271	 */
272	call	set_user_ldt
273	addl	$4,%esp
274	popl	%edx
2752:
276
277	/* This must be done after loading the user LDT. */
278	.globl	cpu_switch_load_gs
279cpu_switch_load_gs:
280	mov	PCB_GS(%edx),%gs
281
282	pushl	%edx
283	pushl	PCPU(CURTHREAD)
284	call	npxswitch
285	popl	%edx
286	popl	%edx
287
288	/* Test if debug registers should be restored. */
289	testl	$PCB_DBREGS,PCB_FLAGS(%edx)
290	jz      1f
291
292	/*
293	 * Restore debug registers.  The special code for dr7 is to
294	 * preserve the current values of its reserved bits.
295	 */
296	movl    PCB_DR6(%edx),%eax
297	movl    %eax,%dr6
298	movl    PCB_DR3(%edx),%eax
299	movl    %eax,%dr3
300	movl    PCB_DR2(%edx),%eax
301	movl    %eax,%dr2
302	movl    PCB_DR1(%edx),%eax
303	movl    %eax,%dr1
304	movl    PCB_DR0(%edx),%eax
305	movl    %eax,%dr0
306	movl	%dr7,%eax
307	andl    $0x0000fc00,%eax
308	movl    PCB_DR7(%edx),%ecx
309	andl	$~0x0000fc00,%ecx
310	orl     %ecx,%eax
311	movl    %eax,%dr7
3121:
313	ret
314
315#ifdef INVARIANTS
316badsw1:
317	pushal
318	pushl	$sw0_1
319	call	panic
320sw0_1:	.asciz	"cpu_throw: no newthread supplied"
321
322badsw2:
323	pushal
324	pushl	$sw0_2
325	call	panic
326sw0_2:	.asciz	"cpu_switch: no curthread supplied"
327
328badsw3:
329	pushal
330	pushl	$sw0_3
331	call	panic
332sw0_3:	.asciz	"cpu_switch: no newthread supplied"
333#endif
334END(cpu_switch)
335
336/*
337 * savectx(pcb)
338 * Update pcb, saving current processor state.
339 */
340ENTRY(savectx)
341	/* Fetch PCB. */
342	movl	4(%esp),%ecx
343
344	/* Save caller's return address.  Child won't execute this routine. */
345	movl	(%esp),%eax
346	movl	%eax,PCB_EIP(%ecx)
347
348	movl	%cr3,%eax
349	movl	%eax,PCB_CR3(%ecx)
350
351	movl	%ebx,PCB_EBX(%ecx)
352	movl	%esp,PCB_ESP(%ecx)
353	movl	%ebp,PCB_EBP(%ecx)
354	movl	%esi,PCB_ESI(%ecx)
355	movl	%edi,PCB_EDI(%ecx)
356	mov	%gs,PCB_GS(%ecx)
357
358	movl	%cr0,%eax
359	movl	%eax,PCB_CR0(%ecx)
360	movl	%cr2,%eax
361	movl	%eax,PCB_CR2(%ecx)
362	movl	%cr4,%eax
363	movl	%eax,PCB_CR4(%ecx)
364
365	movl	%dr0,%eax
366	movl	%eax,PCB_DR0(%ecx)
367	movl	%dr1,%eax
368	movl	%eax,PCB_DR1(%ecx)
369	movl	%dr2,%eax
370	movl	%eax,PCB_DR2(%ecx)
371	movl	%dr3,%eax
372	movl	%eax,PCB_DR3(%ecx)
373	movl	%dr6,%eax
374	movl	%eax,PCB_DR6(%ecx)
375	movl	%dr7,%eax
376	movl	%eax,PCB_DR7(%ecx)
377
378	mov	%ds,PCB_DS(%ecx)
379	mov	%es,PCB_ES(%ecx)
380	mov	%fs,PCB_FS(%ecx)
381	mov	%ss,PCB_SS(%ecx)
382
383	sgdt	PCB_GDT(%ecx)
384	sidt	PCB_IDT(%ecx)
385	sldt	PCB_LDT(%ecx)
386	str	PCB_TR(%ecx)
387
388	movl	$1,%eax
389	ret
390END(savectx)
391
392/*
393 * resumectx(pcb) __fastcall
394 * Resuming processor state from pcb.
395 */
396ENTRY(resumectx)
397	/* Restore GDT. */
398	lgdt	PCB_GDT(%ecx)
399
400	/* Restore segment registers */
401	movzwl	PCB_DS(%ecx),%eax
402	mov	%ax,%ds
403	movzwl	PCB_ES(%ecx),%eax
404	mov	%ax,%es
405	movzwl	PCB_FS(%ecx),%eax
406	mov	%ax,%fs
407	movzwl	PCB_GS(%ecx),%eax
408	movw	%ax,%gs
409	movzwl	PCB_SS(%ecx),%eax
410	mov	%ax,%ss
411
412	/* Restore CR2, CR4, CR3 and CR0 */
413	movl	PCB_CR2(%ecx),%eax
414	movl	%eax,%cr2
415	movl	PCB_CR4(%ecx),%eax
416	movl	%eax,%cr4
417	movl	PCB_CR3(%ecx),%eax
418	movl	%eax,%cr3
419	movl	PCB_CR0(%ecx),%eax
420	movl	%eax,%cr0
421	jmp	1f
4221:
423
424	/* Restore descriptor tables */
425	lidt	PCB_IDT(%ecx)
426	lldt	PCB_LDT(%ecx)
427
428#define SDT_SYS386TSS	9
429#define SDT_SYS386BSY	11
430	/* Clear "task busy" bit and reload TR */
431	movl	PCPU(TSS_GDT),%eax
432	andb	$(~SDT_SYS386BSY | SDT_SYS386TSS),5(%eax)
433	movzwl	PCB_TR(%ecx),%eax
434	ltr	%ax
435#undef SDT_SYS386TSS
436#undef SDT_SYS386BSY
437
438	/* Restore debug registers */
439	movl	PCB_DR0(%ecx),%eax
440	movl	%eax,%dr0
441	movl	PCB_DR1(%ecx),%eax
442	movl	%eax,%dr1
443	movl	PCB_DR2(%ecx),%eax
444	movl	%eax,%dr2
445	movl	PCB_DR3(%ecx),%eax
446	movl	%eax,%dr3
447	movl	PCB_DR6(%ecx),%eax
448	movl	%eax,%dr6
449	movl	PCB_DR7(%ecx),%eax
450	movl	%eax,%dr7
451
452	/* Restore other registers */
453	movl	PCB_EDI(%ecx),%edi
454	movl	PCB_ESI(%ecx),%esi
455	movl	PCB_EBP(%ecx),%ebp
456	movl	PCB_ESP(%ecx),%esp
457	movl	PCB_EBX(%ecx),%ebx
458
459	/* reload code selector by turning return into intersegmental return */
460	pushl	PCB_EIP(%ecx)
461	movl	$KCSEL,4(%esp)
462	xorl	%eax,%eax
463	lret
464END(resumectx)
465