xref: /freebsd/sys/amd64/amd64/cpu_switch.S (revision d0b2dbfa)
1/*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34#include <machine/asmacros.h>
35#include <machine/specialreg.h>
36
37#include "assym.inc"
38#include "opt_sched.h"
39
40/*****************************************************************************/
41/* Scheduling                                                                */
42/*****************************************************************************/
43
44	.text
45
46/*
47 * cpu_throw()
48 *
49 * This is the second half of cpu_switch(). It is used when the current
50 * thread is either a dummy or slated to die, and we no longer care
51 * about its state.  This is only a slight optimization and is probably
52 * not worth it anymore.  Note that we need to clear the pm_active bits so
53 * we do need the old proc if it still exists.
54 * %rdi = oldtd
55 * %rsi = newtd
56 */
57ENTRY(cpu_throw)
58	movq	%rsi,%r12
59	movq	%rsi,%rdi
60	call	pmap_activate_sw
61	jmp	sw1
62END(cpu_throw)
63
64/*
65 * cpu_switch(old, new, mtx)
66 *
67 * Save the current thread state, then select the next thread to run
68 * and load its state.
69 * %rdi = oldtd
70 * %rsi = newtd
71 * %rdx = mtx
72 */
73ENTRY(cpu_switch)
74	/* Switch to new thread.  First, save context. */
75	leaq	TD_MD_PCB(%rdi),%r8
76
77	movq	(%rsp),%rax			/* Hardware registers */
78	movq	%r15,PCB_R15(%r8)
79	movq	%r14,PCB_R14(%r8)
80	movq	%r13,PCB_R13(%r8)
81	movq	%r12,PCB_R12(%r8)
82	movq	%rbp,PCB_RBP(%r8)
83	movq	%rsp,PCB_RSP(%r8)
84	movq	%rbx,PCB_RBX(%r8)
85	movq	%rax,PCB_RIP(%r8)
86
87	testl	$PCB_FULL_IRET,PCB_FLAGS(%r8)
88	jnz	2f
89	orl	$PCB_FULL_IRET,PCB_FLAGS(%r8)
90	testl	$TDP_KTHREAD,TD_PFLAGS(%rdi)
91	jnz	2f
92	testb	$CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
93	jz	2f
94	movl	%fs,%eax
95	cmpl	$KUF32SEL,%eax
96	jne	1f
97	rdfsbase %rax
98	movq	%rax,PCB_FSBASE(%r8)
991:	movl	%gs,%eax
100	cmpl	$KUG32SEL,%eax
101	jne	2f
102	movq	%rdx,%r12
103	movl	$MSR_KGSBASE,%ecx		/* Read user gs base */
104	rdmsr
105	shlq	$32,%rdx
106	orq	%rdx,%rax
107	movq	%rax,PCB_GSBASE(%r8)
108	movq	%r12,%rdx
109
1102:
111	testl	$PCB_DBREGS,PCB_FLAGS(%r8)
112	jnz	store_dr			/* static predict not taken */
113done_store_dr:
114
115	/* have we used fp, and need a save? */
116	cmpq	%rdi,PCPU(FPCURTHREAD)
117	jne	ctx_switch_fpusave_done
118	movq	PCB_SAVEFPU(%r8),%r9
119	clts
120	cmpl	$0,use_xsave(%rip)
121	jne	1f
122	fxsave	(%r9)
123	jmp	ctx_switch_fpusave_done
1241:	movq	%rdx,%rcx
125	movl	xsave_mask,%eax
126	movl	xsave_mask+4,%edx
127	testl	$PCB_32BIT,PCB_FLAGS(%r8)
128	jne	ctx_switch_xsave32
129	.globl	ctx_switch_xsave
130ctx_switch_xsave:
131	/* This is patched to xsaveopt if supported, see fpuinit_bsp1() */
132	xsave64	(%r9)
133ctx_switch_xsave_done:
134	movq	%rcx,%rdx
135ctx_switch_fpusave_done:
136	/* Save is done.  Now fire up new thread. Leave old vmspace. */
137	movq	%rsi,%r12
138	movq	%rdi,%r13
139	movq	%rdx,%r15
140	movq	%rsi,%rdi
141	callq	pmap_activate_sw
142	movq	%r15,TD_LOCK(%r13)		/* Release the old thread */
143sw1:
144	leaq	TD_MD_PCB(%r12),%r8
145#if defined(SCHED_ULE) && defined(SMP)
146	movq	$blocked_lock, %rdx
147	movq	TD_LOCK(%r12),%rcx
148	cmpq	%rcx, %rdx
149	je	sw1wait
150sw1cont:
151#endif
152	/*
153	 * At this point, we've switched address spaces and are ready
154	 * to load up the rest of the next context.
155	 */
156
157	/* Skip loading LDT and user fsbase/gsbase for kthreads */
158	testl	$TDP_KTHREAD,TD_PFLAGS(%r12)
159	jnz	do_kthread
160
161	/*
162	 * Load ldt register
163	 */
164	movq	TD_PROC(%r12),%rcx
165	cmpq	$0, P_MD+MD_LDT(%rcx)
166	jne	do_ldt
167	xorl	%eax,%eax
168ld_ldt:	lldt	%ax
169
170	/* Restore fs base in GDT */
171	movl	PCB_FSBASE(%r8),%eax
172	movq	PCPU(FS32P),%rdx
173	movw	%ax,2(%rdx)
174	shrl	$16,%eax
175	movb	%al,4(%rdx)
176	shrl	$8,%eax
177	movb	%al,7(%rdx)
178
179	/* Restore gs base in GDT */
180	movl	PCB_GSBASE(%r8),%eax
181	movq	PCPU(GS32P),%rdx
182	movw	%ax,2(%rdx)
183	shrl	$16,%eax
184	movb	%al,4(%rdx)
185	shrl	$8,%eax
186	movb	%al,7(%rdx)
187
188do_kthread:
189	/* Do we need to reload tss ? */
190	movq	PCPU(TSSP),%rax
191	movq	PCB_TSSP(%r8),%rdx
192	movq	PCPU(PRVSPACE),%r13
193	addq	$PC_COMMONTSS,%r13
194	testq	%rdx,%rdx
195	cmovzq	%r13,%rdx
196	cmpq	%rax,%rdx
197	jne	do_tss
198done_tss:
199	movq	TD_MD_STACK_BASE(%r12),%r9
200	movq	%r9,PCPU(RSP0)
201	movq	%r8,PCPU(CURPCB)
202	movq	PCPU(PTI_RSP0),%rax
203	cmpq	$~0,PCPU(UCR3)
204	cmove	%r9,%rax
205	movq	%rax,TSS_RSP0(%rdx)
206	movq	%r12,PCPU(CURTHREAD)		/* into next thread */
207
208	/* Test if debug registers should be restored. */
209	testl	$PCB_DBREGS,PCB_FLAGS(%r8)
210	jnz	load_dr				/* static predict not taken */
211done_load_dr:
212
213	/* Restore context. */
214	movq	PCB_R15(%r8),%r15
215	movq	PCB_R14(%r8),%r14
216	movq	PCB_R13(%r8),%r13
217	movq	PCB_R12(%r8),%r12
218	movq	PCB_RBP(%r8),%rbp
219	movq	PCB_RSP(%r8),%rsp
220	movq	PCB_RBX(%r8),%rbx
221	movq	PCB_RIP(%r8),%rax
222	movq	%rax,(%rsp)
223	movq	PCPU(CURTHREAD),%rdi
224	call	fpu_activate_sw
225	cmpb	$0,cpu_flush_rsb_ctxsw(%rip)
226	jne	rsb_flush
227	ret
228
229	/*
230	 * We order these strangely for several reasons.
231	 * 1: I wanted to use static branch prediction hints
232	 * 2: Most athlon64/opteron cpus don't have them.  They define
233	 *    a forward branch as 'predict not taken'.  Intel cores have
234	 *    the 'rep' prefix to invert this.
235	 * So, to make it work on both forms of cpu we do the detour.
236	 * We use jumps rather than call in order to avoid the stack.
237	 */
238
239store_dr:
240	movq	%dr7,%rax			/* yes, do the save */
241	movq	%dr0,%r15
242	movq	%dr1,%r14
243	movq	%dr2,%r13
244	movq	%dr3,%r12
245	movq	%dr6,%r11
246	movq	%r15,PCB_DR0(%r8)
247	movq	%r14,PCB_DR1(%r8)
248	movq	%r13,PCB_DR2(%r8)
249	movq	%r12,PCB_DR3(%r8)
250	movq	%r11,PCB_DR6(%r8)
251	movq	%rax,PCB_DR7(%r8)
252	andq	$0x0000fc00, %rax		/* disable all watchpoints */
253	movq	%rax,%dr7
254	jmp	done_store_dr
255
256load_dr:
257	movq	%dr7,%rax
258	movq	PCB_DR0(%r8),%r15
259	movq	PCB_DR1(%r8),%r14
260	movq	PCB_DR2(%r8),%r13
261	movq	PCB_DR3(%r8),%r12
262	movq	PCB_DR6(%r8),%r11
263	movq	PCB_DR7(%r8),%rcx
264	movq	%r15,%dr0
265	movq	%r14,%dr1
266	/* Preserve reserved bits in %dr7 */
267	andq	$0x0000fc00,%rax
268	andq	$~0x0000fc00,%rcx
269	movq	%r13,%dr2
270	movq	%r12,%dr3
271	orq	%rcx,%rax
272	movq	%r11,%dr6
273	movq	%rax,%dr7
274	jmp	done_load_dr
275
276do_tss:	movq	%rdx,PCPU(TSSP)
277	movq	%rdx,%rcx
278	movq	PCPU(TSS),%rax
279	movw	%cx,2(%rax)
280	shrq	$16,%rcx
281	movb	%cl,4(%rax)
282	shrq	$8,%rcx
283	movb	%cl,7(%rax)
284	shrq	$8,%rcx
285	movl	%ecx,8(%rax)
286	movb	$0x89,5(%rax)	/* unset busy */
287	movl	$TSSSEL,%eax
288	ltr	%ax
289	jmp	done_tss
290
291do_ldt:	movq	PCPU(LDT),%rax
292	movq	P_MD+MD_LDT_SD(%rcx),%rdx
293	movq	%rdx,(%rax)
294	movq	P_MD+MD_LDT_SD+8(%rcx),%rdx
295	movq	%rdx,8(%rax)
296	movl	$LDTSEL,%eax
297	jmp	ld_ldt
298
299	.globl	ctx_switch_xsave32
300ctx_switch_xsave32:
301	xsave	(%r9)
302	jmp	ctx_switch_xsave_done
303END(cpu_switch)
304
305/*
306 * savectx(pcb)
307 * Update pcb, saving current processor state.
308 */
309ENTRY(savectx)
310	/* Save caller's return address. */
311	movq	(%rsp),%rax
312	movq	%rax,PCB_RIP(%rdi)
313
314	movq	%rbx,PCB_RBX(%rdi)
315	movq	%rsp,PCB_RSP(%rdi)
316	movq	%rbp,PCB_RBP(%rdi)
317	movq	%r12,PCB_R12(%rdi)
318	movq	%r13,PCB_R13(%rdi)
319	movq	%r14,PCB_R14(%rdi)
320	movq	%r15,PCB_R15(%rdi)
321
322	movq	%cr0,%rax
323	movq	%rax,PCB_CR0(%rdi)
324	movq	%cr2,%rax
325	movq	%rax,PCB_CR2(%rdi)
326	movq	%cr3,%rax
327	movq	%rax,PCB_CR3(%rdi)
328	movq	%cr4,%rax
329	movq	%rax,PCB_CR4(%rdi)
330
331	movq	%dr0,%rax
332	movq	%rax,PCB_DR0(%rdi)
333	movq	%dr1,%rax
334	movq	%rax,PCB_DR1(%rdi)
335	movq	%dr2,%rax
336	movq	%rax,PCB_DR2(%rdi)
337	movq	%dr3,%rax
338	movq	%rax,PCB_DR3(%rdi)
339	movq	%dr6,%rax
340	movq	%rax,PCB_DR6(%rdi)
341	movq	%dr7,%rax
342	movq	%rax,PCB_DR7(%rdi)
343
344	movl	$MSR_FSBASE,%ecx
345	rdmsr
346	movl	%eax,PCB_FSBASE(%rdi)
347	movl	%edx,PCB_FSBASE+4(%rdi)
348	movl	$MSR_GSBASE,%ecx
349	rdmsr
350	movl	%eax,PCB_GSBASE(%rdi)
351	movl	%edx,PCB_GSBASE+4(%rdi)
352	movl	$MSR_KGSBASE,%ecx
353	rdmsr
354	movl	%eax,PCB_KGSBASE(%rdi)
355	movl	%edx,PCB_KGSBASE+4(%rdi)
356	movl	$MSR_EFER,%ecx
357	rdmsr
358	movl	%eax,PCB_EFER(%rdi)
359	movl	%edx,PCB_EFER+4(%rdi)
360	movl	$MSR_STAR,%ecx
361	rdmsr
362	movl	%eax,PCB_STAR(%rdi)
363	movl	%edx,PCB_STAR+4(%rdi)
364	movl	$MSR_LSTAR,%ecx
365	rdmsr
366	movl	%eax,PCB_LSTAR(%rdi)
367	movl	%edx,PCB_LSTAR+4(%rdi)
368	movl	$MSR_CSTAR,%ecx
369	rdmsr
370	movl	%eax,PCB_CSTAR(%rdi)
371	movl	%edx,PCB_CSTAR+4(%rdi)
372	movl	$MSR_SF_MASK,%ecx
373	rdmsr
374	movl	%eax,PCB_SFMASK(%rdi)
375	movl	%edx,PCB_SFMASK+4(%rdi)
376
377	sgdt	PCB_GDT(%rdi)
378	sidt	PCB_IDT(%rdi)
379	sldt	PCB_LDT(%rdi)
380	str	PCB_TR(%rdi)
381
382	movl	$1,%eax
383	ret
384END(savectx)
385
386/*
387 * resumectx(pcb)
388 * Resuming processor state from pcb.
389 */
390ENTRY(resumectx)
391	/* Switch to KPML5/4phys. */
392	movq	KPML4phys,%rax
393	movq	KPML5phys,%rcx
394	cmpl	$0, la57
395	cmovne	%rcx, %rax
396	movq	%rax,%cr3
397
398	/* Force kernel segment registers. */
399	movl	$KDSEL,%eax
400	movw	%ax,%ds
401	movw	%ax,%es
402	movw	%ax,%ss
403	movl	$KUF32SEL,%eax
404	movw	%ax,%fs
405	movl	$KUG32SEL,%eax
406	movw	%ax,%gs
407
408	movl	$MSR_FSBASE,%ecx
409	movl	PCB_FSBASE(%rdi),%eax
410	movl	4 + PCB_FSBASE(%rdi),%edx
411	wrmsr
412	movl	$MSR_GSBASE,%ecx
413	movl	PCB_GSBASE(%rdi),%eax
414	movl	4 + PCB_GSBASE(%rdi),%edx
415	wrmsr
416	movl	$MSR_KGSBASE,%ecx
417	movl	PCB_KGSBASE(%rdi),%eax
418	movl	4 + PCB_KGSBASE(%rdi),%edx
419	wrmsr
420
421	/* Restore EFER one more time. */
422	movl	$MSR_EFER,%ecx
423	movl	PCB_EFER(%rdi),%eax
424	wrmsr
425
426	/* Restore fast syscall stuff. */
427	movl	$MSR_STAR,%ecx
428	movl	PCB_STAR(%rdi),%eax
429	movl	4 + PCB_STAR(%rdi),%edx
430	wrmsr
431	movl	$MSR_LSTAR,%ecx
432	movl	PCB_LSTAR(%rdi),%eax
433	movl	4 + PCB_LSTAR(%rdi),%edx
434	wrmsr
435	movl	$MSR_CSTAR,%ecx
436	movl	PCB_CSTAR(%rdi),%eax
437	movl	4 + PCB_CSTAR(%rdi),%edx
438	wrmsr
439	movl	$MSR_SF_MASK,%ecx
440	movl	PCB_SFMASK(%rdi),%eax
441	wrmsr
442
443	/* Restore CR0, CR2, CR4 and CR3. */
444	movq	PCB_CR0(%rdi),%rax
445	movq	%rax,%cr0
446	movq	PCB_CR2(%rdi),%rax
447	movq	%rax,%cr2
448	movq	PCB_CR4(%rdi),%rax
449	movq	%rax,%cr4
450	movq	PCB_CR3(%rdi),%rax
451	movq	%rax,%cr3
452
453	/* Restore descriptor tables. */
454	lidt	PCB_IDT(%rdi)
455	lldt	PCB_LDT(%rdi)
456
457#define	SDT_SYSTSS	9
458#define	SDT_SYSBSY	11
459
460	/* Clear "task busy" bit and reload TR. */
461	movq	PCPU(TSS),%rax
462	andb	$(~SDT_SYSBSY | SDT_SYSTSS),5(%rax)
463	movw	PCB_TR(%rdi),%ax
464	ltr	%ax
465
466#undef	SDT_SYSTSS
467#undef	SDT_SYSBSY
468
469	/* Restore debug registers. */
470	movq	PCB_DR0(%rdi),%rax
471	movq	%rax,%dr0
472	movq	PCB_DR1(%rdi),%rax
473	movq	%rax,%dr1
474	movq	PCB_DR2(%rdi),%rax
475	movq	%rax,%dr2
476	movq	PCB_DR3(%rdi),%rax
477	movq	%rax,%dr3
478	movq	PCB_DR6(%rdi),%rax
479	movq	%rax,%dr6
480	movq	PCB_DR7(%rdi),%rax
481	movq	%rax,%dr7
482
483	/* Restore other callee saved registers. */
484	movq	PCB_R15(%rdi),%r15
485	movq	PCB_R14(%rdi),%r14
486	movq	PCB_R13(%rdi),%r13
487	movq	PCB_R12(%rdi),%r12
488	movq	PCB_RBP(%rdi),%rbp
489	movq	PCB_RSP(%rdi),%rsp
490	movq	PCB_RBX(%rdi),%rbx
491
492	/* Restore return address. */
493	movq	PCB_RIP(%rdi),%rax
494	movq	%rax,(%rsp)
495
496	xorl	%eax,%eax
497	ret
498END(resumectx)
499
500/* Wait for the new thread to become unblocked */
501#if defined(SCHED_ULE) && defined(SMP)
502sw1wait:
5031:
504	pause
505	movq	TD_LOCK(%r12),%rcx
506	cmpq	%rcx, %rdx
507	je	1b
508	jmp	sw1cont
509#endif
510