xref: /dragonfly/sys/platform/pc64/x86_64/swtch.s (revision 02ac8a8f)
1/*
2 * Copyright (c) 2003,2004,2008 The DragonFly Project.  All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *    the documentation and/or other materials provided with the
17 *    distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 *    contributors may be used to endorse or promote products derived
20 *    from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
37 *
38 * This code is derived from software contributed to Berkeley by
39 * William Jolitz.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 *    may be used to endorse or promote products derived from this software
51 *    without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
66 */
67
68//#include "use_npx.h"
69
70#include <sys/rtprio.h>
71
72#include <machine/asmacros.h>
73#include <machine/segments.h>
74
75#include <machine/pmap.h>
76#if 0 /* JG */
77#include <machine_base/apic/apicreg.h>
78#endif
79#include <machine/lock.h>
80
81#include "assym.s"
82
83#define	MPLOCKED	lock ;
84
85/*
86 * PREEMPT_OPTIMIZE
87 *
88 * This feature allows the preempting (interrupt) kernel thread to borrow
89 * %cr3 from the user process it interrupts, allowing us to do-away with
90 * two %cr3 stores, two atomic ops (pm_active is not modified), and pmap
91 * lock tests (not needed since pm_active is not modified).
92 *
93 * Unfortunately, I couldn't really measure any result so for now the
94 * optimization is disabled.
95 */
96#undef PREEMPT_OPTIMIZE
97
98/*
99 * LWP_SWITCH_OPTIMIZE
100 *
101 * This optimization attempted to avoid a %cr3 store and atomic op, and
102 * it might have been useful on older cpus but newer cpus (and more
103 * importantly multi-core cpus) generally do not switch between LWPs on
104 * the same cpu.  Multiple user threads are more likely to be distributed
105 * across multiple cpus.  In cpu-bound situations the scheduler will already
106 * be in batch-mode (meaning relatively few context-switches/sec), and
107 * otherwise the lwp(s) are likely to be blocked waiting for events.
108 *
109 * On the flip side, the conditionals this option uses measurably reduce
110 * performance (just slightly, honestly).  So this option is disabled.
111 */
112#undef LWP_SWITCH_OPTIMIZE
113
114	/*
115	 * Global Declarations
116	 */
117	.data
118
119	.globl	panic
120	.globl	lwkt_switch_return
121
122#if defined(SWTCH_OPTIM_STATS)
123	.globl	swtch_optim_stats, tlb_flush_count
124swtch_optim_stats:	.long	0		/* number of _swtch_optims */
125tlb_flush_count:	.long	0
126#endif
127
128	/*
129	 * Code
130	 */
131	.text
132
133/*
134 * cpu_heavy_switch(struct thread *next_thread)
135 *
136 *	Switch from the current thread to a new thread.  This entry
137 *	is normally called via the thread->td_switch function, and will
138 *	only be called when the current thread is a heavy weight process.
139 *
140 *	Some instructions have been reordered to reduce pipeline stalls.
141 *
142 *	YYY disable interrupts once giant is removed.
143 */
144ENTRY(cpu_heavy_switch)
145	/*
146	 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
147	 */
148	movq	PCPU(curthread),%rcx
149	/* On top of the stack is the return adress. */
150	movq	(%rsp),%rax			/* (reorder optimization) */
151	movq	TD_PCB(%rcx),%rdx		/* RDX = PCB */
152	movq	%rax,PCB_RIP(%rdx)		/* return PC may be modified */
153	movq	%rbx,PCB_RBX(%rdx)
154	movq	%rsp,PCB_RSP(%rdx)
155	movq	%rbp,PCB_RBP(%rdx)
156	movq	%r12,PCB_R12(%rdx)
157	movq	%r13,PCB_R13(%rdx)
158	movq	%r14,PCB_R14(%rdx)
159	movq	%r15,PCB_R15(%rdx)
160
161	/*
162	 * Clear the cpu bit in the pmap active mask.  The restore
163	 * function will set the bit in the pmap active mask.
164	 *
165	 * If we are switching away due to a preempt, TD_PREEMPTED(%rdi)
166	 * will be non-NULL.  In this situation we do want to avoid extra
167	 * atomic ops and %cr3 reloads (see top of file for reasoning).
168	 *
169	 * NOTE: Do not try to optimize avoiding the %cr3 reload or pm_active
170	 *	 adjustment.  This mattered on uni-processor systems but in
171	 *	 multi-core systems we are highly unlikely to be switching
172	 *	 to another thread belonging to the same process on this cpu.
173	 *
174	 *	 (more likely the target thread is still sleeping, or if cpu-
175	 *	 bound the scheduler is in batch mode and the switch rate is
176	 *	 already low).
177	 */
178	movq	%rcx,%rbx			/* RBX = oldthread */
179#ifdef PREEMPT_OPTIMIZE
180	/*
181	 * If we are being preempted the target thread borrows our %cr3
182	 * and we leave our pmap bits intact for the duration.
183	 */
184	movq	TD_PREEMPTED(%rdi),%r13
185	testq	%r13,%r13
186	jne	2f
187#endif
188
189	movq	TD_LWP(%rcx),%rcx		/* RCX = oldlwp	*/
190	movq	LWP_VMSPACE(%rcx), %rcx		/* RCX = oldvmspace */
191#ifdef LWP_SWITCH_OPTIMIZE
192	movq	TD_LWP(%rdi),%r13		/* R13 = newlwp */
193	testq	%r13,%r13			/* might not be a heavy */
194	jz	1f
195	cmpq	LWP_VMSPACE(%r13),%rcx		/* same vmspace? */
196	je	2f
1971:
198#endif
199	movq	PCPU(cpumask_simple),%rsi
200	movq	PCPU(cpumask_offset),%r12
201	xorq	$-1,%rsi
202	MPLOCKED andq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1)
2032:
204
205	/*
206	 * Push the LWKT switch restore function, which resumes a heavy
207	 * weight process.  Note that the LWKT switcher is based on
208	 * TD_SP, while the heavy weight process switcher is based on
209	 * PCB_RSP.  TD_SP is usually two ints pushed relative to
210	 * PCB_RSP.  We push the flags for later restore by cpu_heavy_restore.
211	 */
212	pushfq
213	cli
214	movq	$cpu_heavy_restore, %rax
215	pushq	%rax
216	movq	%rsp,TD_SP(%rbx)
217
218	/*
219	 * Save debug regs if necessary
220	 */
221	movq	PCB_FLAGS(%rdx),%rax
222	andq	$PCB_DBREGS,%rax
223	jz	1f				/* no, skip over */
224	movq	%dr7,%rax			/* yes, do the save */
225	movq	%rax,PCB_DR7(%rdx)
226	/* JG correct value? */
227	andq	$0x0000fc00, %rax		/* disable all watchpoints */
228	movq	%rax,%dr7
229	movq	%dr6,%rax
230	movq	%rax,PCB_DR6(%rdx)
231	movq	%dr3,%rax
232	movq	%rax,PCB_DR3(%rdx)
233	movq	%dr2,%rax
234	movq	%rax,PCB_DR2(%rdx)
235	movq	%dr1,%rax
236	movq	%rax,PCB_DR1(%rdx)
237	movq	%dr0,%rax
238	movq	%rax,PCB_DR0(%rdx)
2391:
240
241	/*
242	 * Save the FP state if we have used the FP.  Note that calling
243	 * npxsave will NULL out PCPU(npxthread).
244	 */
245	cmpq	%rbx,PCPU(npxthread)
246	jne	1f
247	movq	%rdi,%r12		/* save %rdi. %r12 is callee-saved */
248	movq	TD_SAVEFPU(%rbx),%rdi
249	call	npxsave			/* do it in a big C function */
250	movq	%r12,%rdi		/* restore %rdi */
2511:
252
253	/*
254	 * Switch to the next thread, which was passed as an argument
255	 * to cpu_heavy_switch().  The argument is in %rdi.
256	 * Set the current thread, load the stack pointer,
257	 * and 'ret' into the switch-restore function.
258	 *
259	 * The switch restore function expects the new thread to be in %rax
260	 * and the old one to be in %rbx.
261	 *
262	 * There is a one-instruction window where curthread is the new
263	 * thread but %rsp still points to the old thread's stack, but
264	 * we are protected by a critical section so it is ok.
265	 */
266	movq	%rdi,%rax		/* RAX = newtd, RBX = oldtd */
267	movq	%rax,PCPU(curthread)
268	movq	TD_SP(%rax),%rsp
269	ret
270END(cpu_heavy_switch)
271
272/*
273 *  cpu_exit_switch(struct thread *next)
274 *
275 *	The switch function is changed to this when a thread is going away
276 *	for good.  We have to ensure that the MMU state is not cached, and
277 *	we don't bother saving the existing thread state before switching.
278 *
279 *	At this point we are in a critical section and this cpu owns the
280 *	thread's token, which serves as an interlock until the switchout is
281 *	complete.
282 */
283ENTRY(cpu_exit_switch)
284
285#ifdef PREEMPT_OPTIMIZE
286	/*
287	 * If we were preempting we are switching back to the original thread.
288	 * In this situation we already have the original thread's %cr3 and
289	 * should not replace it!
290	 */
291	testl	$TDF_PREEMPT_DONE, TD_FLAGS(%rdi)
292	jne	1f
293#endif
294
295	/*
296	 * Get us out of the vmspace
297	 */
298	movq	KPML4phys,%rcx
299	movq	%cr3,%rax
300	cmpq	%rcx,%rax
301	je	1f
302
303	movq	%rcx,%cr3
3041:
305	movq	PCPU(curthread),%rbx
306
307	/*
308	 * If this is a process/lwp, deactivate the pmap after we've
309	 * switched it out.
310	 */
311	movq	TD_LWP(%rbx),%rcx
312	testq	%rcx,%rcx
313	jz	2f
314	movq	LWP_VMSPACE(%rcx), %rcx		/* RCX = vmspace */
315
316	movq	PCPU(cpumask_simple),%rax
317	movq	PCPU(cpumask_offset),%r12
318	xorq	$-1,%rax
319	MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1)
3202:
321	/*
322	 * Switch to the next thread.  RET into the restore function, which
323	 * expects the new thread in RAX and the old in RBX.
324	 *
325	 * There is a one-instruction window where curthread is the new
326	 * thread but %rsp still points to the old thread's stack, but
327	 * we are protected by a critical section so it is ok.
328	 */
329	cli
330	movq	%rdi,%rax
331	movq	%rax,PCPU(curthread)
332	movq	TD_SP(%rax),%rsp
333	ret
334END(cpu_exit_switch)
335
336/*
337 * cpu_heavy_restore()	(current thread in %rax on entry, old thread in %rbx)
338 *
339 *	We immediately move %rax to %r12.  %rbx is retained throughout, and
340 *	we nominally use %r14 for TD_PCB(%r12) until near the end where we
341 *	switch to %rdx for that.
342 *
343 *	Restore the thread after an LWKT switch.  This entry is normally
344 *	called via the LWKT switch restore function, which was pulled
345 *	off the thread stack and jumped to.
346 *
347 *	This entry is only called if the thread was previously saved
348 *	using cpu_heavy_switch() (the heavy weight process thread switcher),
349 *	or when a new process is initially scheduled.
350 *
351 *	NOTE: The lwp may be in any state, not necessarily LSRUN, because
352 *	a preemption switch may interrupt the process and then return via
353 *	cpu_heavy_restore.
354 *
355 *	YYY theoretically we do not have to restore everything here, a lot
356 *	of this junk can wait until we return to usermode.  But for now
357 *	we restore everything.
358 *
359 *	YYY the PCB crap is really crap, it makes startup a bitch because
360 *	we can't switch away.
361 *
362 *	YYY note: spl check is done in mi_switch when it splx()'s.
363 */
364
365ENTRY(cpu_heavy_restore)
366	movq	%rax,%r12			/* R12 = newtd */
367	movq	TD_PCB(%rax),%r14		/* R14 = PCB */
368	movq	%r14, PCPU(trampoline)+TR_PCB_RSP
369	movq	PCB_FLAGS(%r14), %rcx
370	movq	%rcx, PCPU(trampoline)+TR_PCB_FLAGS
371	movq	PCB_CR3_ISO(%r14), %rcx
372	movq	%rcx, PCPU(trampoline)+TR_PCB_CR3_ISO
373	movq	PCB_CR3(%r14), %rcx
374	movq	%rcx, PCPU(trampoline)+TR_PCB_CR3
375	popfq
376
377#if defined(SWTCH_OPTIM_STATS)
378	incl	_swtch_optim_stats
379#endif
380#ifdef PREEMPT_OPTIMIZE
381	/*
382	 * If restoring our thread after a preemption has returned to
383	 * us, our %cr3 and pmap were borrowed and are being returned to
384	 * us and no further action on those items need be taken.
385	 */
386	testl	$TDF_PREEMPT_DONE, TD_FLAGS(%r12)
387	jne	4f
388#endif
389
390	/*
391	 * Tell the pmap that our cpu is using the VMSPACE now.  We cannot
392	 * safely test/reload %cr3 until after we have set the bit in the
393	 * pmap.
394	 *
395	 * We must do an interlocked test of the CPULOCK_EXCL at the same
396	 * time.  If found to be set we will have to wait for it to clear
397	 * and then do a forced reload of %cr3 (even if the value matches).
398	 *
399	 * XXX When switching between two LWPs sharing the same vmspace
400	 *     the cpu_heavy_switch() code currently avoids clearing the
401	 *     cpu bit in PM_ACTIVE.  So if the bit is already set we can
402	 *     avoid checking for the interlock via CPULOCK_EXCL.  We currently
403	 *     do not perform this optimization.
404	 */
405	movq	TD_LWP(%r12),%rcx
406	movq	LWP_VMSPACE(%rcx),%rcx		/* RCX = vmspace */
407
408	movq	PCPU(cpumask_simple),%rsi
409	movq	PCPU(cpumask_offset),%r13
410	MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r13, 1)
411
412	movl	VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi
413	testl	$CPULOCK_EXCL,%esi
414	jz	1f
415
416	movq	%rcx,%rdi		/* (found to be set) */
417	call	pmap_interlock_wait	/* pmap_interlock_wait(%rdi:vm) */
418
419	/*
420	 * Need unconditional load cr3
421	 */
422	movq	PCB_CR3(%r14),%rcx	/* RCX = desired CR3 */
423	jmp	2f			/* unconditional reload */
4241:
425	/*
426	 * Restore the MMU address space.  If it is the same as the last
427	 * thread we don't have to invalidate the tlb (i.e. reload cr3).
428	 *
429	 * XXX Temporary cludge, do NOT do this optimization!  The problem
430	 *     is that the pm_active bit for the cpu had dropped for a small
431	 *     period of time, just a few cycles, but even one cycle is long
432	 *     enough for some other cpu doing a pmap invalidation to not see
433	 *     our cpu.
434	 *
435	 *     When that happens, and we don't invltlb (by loading %cr3), we
436	 *     wind up with a stale TLB.
437	 */
438	movq	%cr3,%rsi			/* RSI = current CR3 */
439	movq	PCB_CR3(%r14),%rcx		/* RCX = desired CR3 */
440	cmpq	%rsi,%rcx
441	/*je	4f*/
4422:
443#if defined(SWTCH_OPTIM_STATS)
444	decl	_swtch_optim_stats
445	incl	_tlb_flush_count
446#endif
447	movq	%rcx,%cr3
4484:
449
450	/*
451	 * NOTE: %rbx is the previous thread and %r12 is the new thread.
452	 *	 %rbx is retained throughout so we can return it.
453	 *
454	 *	 lwkt_switch[_return] is responsible for handling TDF_RUNNING.
455	 */
456
457	/*
458	 * Deal with the PCB extension, restore the private tss
459	 */
460	movq	PCB_EXT(%r14),%rdi	/* check for a PCB extension */
461	movq	$1,%rcx			/* maybe mark use of a private tss */
462	testq	%rdi,%rdi
463#if 0 /* JG */
464	jnz	2f
465#endif
466
467#if 0
468	/*
469	 * Going back to the common_tss.  (this was already executed at
470	 * the top).
471	 *
472	 * Set the top of the supervisor stack for the new thread
473	 * in gd_thread_pcb so the trampoline code can load it into %rsp.
474	 */
475	movq	%r14, PCPU(trampoline)+TR_PCB_RSP
476	movq	PCB_FLAGS(%r14), %rcx
477	movq	%rcx, PCPU(trampoline)+TR_PCB_FLAGS
478	movq	PCB_CR3_ISO(%r14), %rcx
479	movq	%rcx, PCPU(trampoline)+TR_PCB_CR3_ISO
480	movq	PCB_CR3(%r14), %rcx
481	movq	%rcx, PCPU(trampoline)+TR_PCB_CR3
482#endif
483
484#if 0 /* JG */
485	cmpl	$0,PCPU(private_tss)	/* don't have to reload if      */
486	je	3f			/* already using the common TSS */
487
488	/* JG? */
489	subq	%rcx,%rcx		/* unmark use of private tss */
490
491	/*
492	 * Get the address of the common TSS descriptor for the ltr.
493	 * There is no way to get the address of a segment-accessed variable
494	 * so we store a self-referential pointer at the base of the per-cpu
495	 * data area and add the appropriate offset.
496	 */
497	/* JG movl? */
498	movq	$gd_common_tssd, %rdi
499	/* JG name for "%gs:0"? */
500	addq	%gs:0, %rdi
501
502	/*
503	 * Move the correct TSS descriptor into the GDT slot, then reload
504	 * ltr.
505	 */
5062:
507	/* JG */
508	movl	%rcx,PCPU(private_tss)		/* mark/unmark private tss */
509	movq	PCPU(tss_gdt), %rbx		/* entry in GDT */
510	movq	0(%rdi), %rax
511	movq	%rax, 0(%rbx)
512	movl	$GPROC0_SEL*8, %esi		/* GSEL(entry, SEL_KPL) */
513	ltr	%si
514#endif
515
5163:
517	/*
518	 * Restore the user %gs and %fs
519	 */
520	movq	PCB_FSBASE(%r14),%r9
521	cmpq	PCPU(user_fs),%r9
522	je	4f
523	movq	%r9,PCPU(user_fs)
524	movl	$MSR_FSBASE,%ecx
525	movl	PCB_FSBASE(%r14),%eax
526	movl	PCB_FSBASE+4(%r14),%edx
527	wrmsr
5284:
529	movq	PCB_GSBASE(%r14),%r9
530	cmpq	PCPU(user_gs),%r9
531	je	5f
532	movq	%r9,PCPU(user_gs)
533	movl	$MSR_KGSBASE,%ecx	/* later swapgs moves it to GSBASE */
534	movl	PCB_GSBASE(%r14),%eax
535	movl	PCB_GSBASE+4(%r14),%edx
536	wrmsr
5375:
538	/*
539	 * Actively restore FP state
540	 */
541	movq	PCPU(npxthread),%r13
542	testq	%r13,%r13
543	jnz	6f
544	movl	TD_FLAGS(%r12),%r13d
545	andq	$TDF_USINGFP,%r13
546	jz	6f
547	movq	%r12,%rdi		/* npxdna_quick(newtd) */
548	call	npxdna_quick
5496:
550
551	/*
552	 * Restore general registers.  %rbx is restored later.
553	 *
554	 * Switch our PCB register from %r14 to %rdx so we can restore
555	 * %r14.
556	 */
557	movq	%r14,%rdx
558	movq	PCB_RSP(%rdx), %rsp
559	movq	PCB_RBP(%rdx), %rbp
560	movq	PCB_R12(%rdx), %r12
561	movq	PCB_R13(%rdx), %r13
562	movq	PCB_R14(%rdx), %r14
563	movq	PCB_R15(%rdx), %r15
564	movq	PCB_RIP(%rdx), %rax
565	movq	%rax, (%rsp)
566	movw	$KDSEL,%ax
567	movw	%ax,%es
568
569#if 0 /* JG */
570	/*
571	 * Restore the user LDT if we have one
572	 */
573	cmpl	$0, PCB_USERLDT(%edx)
574	jnz	1f
575	movl	_default_ldt,%eax
576	cmpl	PCPU(currentldt),%eax
577	je	2f
578	lldt	_default_ldt
579	movl	%eax,PCPU(currentldt)
580	jmp	2f
5811:	pushl	%edx
582	call	set_user_ldt
583	popl	%edx
5842:
585#endif
586#if 0 /* JG */
587	/*
588	 * Restore the user TLS if we have one
589	 */
590	pushl	%edx
591	call	set_user_TLS
592	popl	%edx
593#endif
594
595	/*
596	 * Restore the DEBUG register state if necessary.
597	 */
598	movq	PCB_FLAGS(%rdx),%rax
599	andq	$PCB_DBREGS,%rax
600	jz	1f				/* no, skip over */
601	movq	PCB_DR6(%rdx),%rax		/* yes, do the restore */
602	movq	%rax,%dr6
603	movq	PCB_DR3(%rdx),%rax
604	movq	%rax,%dr3
605	movq	PCB_DR2(%rdx),%rax
606	movq	%rax,%dr2
607	movq	PCB_DR1(%rdx),%rax
608	movq	%rax,%dr1
609	movq	PCB_DR0(%rdx),%rax
610	movq	%rax,%dr0
611	movq	%dr7,%rax		/* load dr7 so as not to disturb */
612	/* JG correct value? */
613	andq	$0x0000fc00,%rax	/*   reserved bits               */
614	/* JG we've got more registers on x86_64 */
615	movq	PCB_DR7(%rdx),%rcx
616	/* JG correct value? */
617	andq	$~0x0000fc00,%rcx
618	orq	%rcx,%rax
619	movq	%rax,%dr7
620
621	/*
622	 * Clear the QUICKRET flag when restoring a user process context
623	 * so we don't try to do a quick syscall return.
624	 */
6251:
626	andl	$~RQF_QUICKRET,PCPU(reqflags)
627	movq	%rbx,%rax
628	movq	PCB_RBX(%rdx),%rbx
629	ret
630END(cpu_heavy_restore)
631
632/*
633 * savectx(struct pcb *pcb)
634 *
635 * Update pcb, saving current processor state.
636 */
637ENTRY(savectx)
638	/* fetch PCB */
639	/* JG use %rdi instead of %rcx everywhere? */
640	movq	%rdi,%rcx
641
642	/* caller's return address - child won't execute this routine */
643	movq	(%rsp),%rax
644	movq	%rax,PCB_RIP(%rcx)
645
646	movq	%cr3,%rax
647	movq	%rax,PCB_CR3(%rcx)
648
649	movq	%rbx,PCB_RBX(%rcx)
650	movq	%rsp,PCB_RSP(%rcx)
651	movq	%rbp,PCB_RBP(%rcx)
652	movq	%r12,PCB_R12(%rcx)
653	movq	%r13,PCB_R13(%rcx)
654	movq	%r14,PCB_R14(%rcx)
655	movq	%r15,PCB_R15(%rcx)
656
657#if 1
658	/*
659	 * If npxthread == NULL, then the npx h/w state is irrelevant and the
660	 * state had better already be in the pcb.  This is true for forks
661	 * but not for dumps (the old book-keeping with FP flags in the pcb
662	 * always lost for dumps because the dump pcb has 0 flags).
663	 *
664	 * If npxthread != NULL, then we have to save the npx h/w state to
665	 * npxthread's pcb and copy it to the requested pcb, or save to the
666	 * requested pcb and reload.  Copying is easier because we would
667	 * have to handle h/w bugs for reloading.  We used to lose the
668	 * parent's npx state for forks by forgetting to reload.
669	 */
670	movq	PCPU(npxthread),%rax
671	testq	%rax,%rax
672	jz	1f
673
674	pushq	%rcx			/* target pcb */
675	movq	TD_SAVEFPU(%rax),%rax	/* originating savefpu area */
676	pushq	%rax
677
678	movq	%rax,%rdi
679	call	npxsave
680
681	popq	%rax
682	popq	%rcx
683
684	movq	$PCB_SAVEFPU_SIZE,%rdx
685	leaq	PCB_SAVEFPU(%rcx),%rcx
686	movq	%rcx,%rsi
687	movq	%rax,%rdi
688	call	bcopy
689#endif
690
6911:
692	ret
693END(savectx)
694
695/*
696 * cpu_idle_restore()	(current thread in %rax on entry, old thread in %rbx)
697 *			(one-time entry)
698 *
699 *	Don't bother setting up any regs other than %rbp so backtraces
700 *	don't die.  This restore function is used to bootstrap into the
701 *	cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
702 *	switching.
703 *
704 *	Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
705 *	This only occurs during system boot so no special handling is
706 *	required for migration.
707 *
708 *	If we are an AP we have to call ap_init() before jumping to
709 *	cpu_idle().  ap_init() will synchronize with the BP and finish
710 *	setting up various ncpu-dependant globaldata fields.  This may
711 *	happen on UP as well as SMP if we happen to be simulating multiple
712 *	cpus.
713 */
714ENTRY(cpu_idle_restore)
715	/* cli */
716	movq	KPML4phys,%rcx
717	xorq	%rbp,%rbp		/* dummy frame pointer */
718	pushq	$0			/* dummy return pc */
719
720	/* NOTE: idle thread can never preempt */
721	movq	%rcx,%cr3
722	cmpl	$0,PCPU(cpuid)
723	je	1f
724	andl	$~TDF_RUNNING,TD_FLAGS(%rbx)
725	orl	$TDF_RUNNING,TD_FLAGS(%rax)	/* manual, no switch_return */
726	call	ap_init
727	/*
728	 * ap_init can decide to enable interrupts early, but otherwise, or if
729	 * we are UP, do it here.
730	 */
731	sti
732	jmp	cpu_idle
733
734	/*
735	 * cpu 0's idle thread entry for the first time must use normal
736	 * lwkt_switch_return() semantics or a pending cpu migration on
737	 * thread0 will deadlock.
738	 */
7391:
740	sti
741	pushq	%rax
742	movq	%rbx,%rdi
743	call	lwkt_switch_return
744	popq	%rax
745	jmp	cpu_idle
746END(cpu_idle_restore)
747
748/*
749 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx)
750 *			 (one-time execution)
751 *
752 *	Don't bother setting up any regs other then %rbp so backtraces
753 *	don't die.  This restore function is used to bootstrap into an
754 *	LWKT based kernel thread only.  cpu_lwkt_switch() will be used
755 *	after this.
756 *
757 *	Because this switch target does not 'return' to lwkt_switch()
758 *	we have to call lwkt_switch_return(otd) to clean up otd.
759 *	otd is in %ebx.
760 *
761 *	Since all of our context is on the stack we are reentrant and
762 *	we can release our critical section and enable interrupts early.
763 */
764ENTRY(cpu_kthread_restore)
765	sti
766	movq	KPML4phys,%rcx
767	movq	TD_PCB(%rax),%r13
768	xorq	%rbp,%rbp
769
770#ifdef PREEMPT_OPTIMIZE
771	/*
772	 * If we are preempting someone we borrow their %cr3, do not overwrite
773	 * it!
774	 */
775	movq	TD_PREEMPTED(%rax),%r14
776	testq	%r14,%r14
777	jne	1f
778#endif
779	movq	%rcx,%cr3
7801:
781
782	/*
783	 * rax and rbx come from the switchout code.  Call
784	 * lwkt_switch_return(otd).
785	 *
786	 * NOTE: unlike i386, the %rsi and %rdi are not call-saved regs.
787	 */
788	pushq	%rax
789	movq	%rbx,%rdi
790	call	lwkt_switch_return
791	popq	%rax
792	decl	TD_CRITCOUNT(%rax)
793	movq	PCB_R12(%r13),%rdi	/* argument to RBX function */
794	movq	PCB_RBX(%r13),%rax	/* thread function */
795	/* note: top of stack return address inherited by function */
796	jmp	*%rax
797END(cpu_kthread_restore)
798
799/*
800 * cpu_lwkt_switch(struct thread *)
801 *
802 *	Standard LWKT switching function.  Only non-scratch registers are
803 *	saved and we don't bother with the MMU state or anything else.
804 *
805 *	This function is always called while in a critical section.
806 *
807 *	There is a one-instruction window where curthread is the new
808 *	thread but %rsp still points to the old thread's stack, but
809 *	we are protected by a critical section so it is ok.
810 */
811ENTRY(cpu_lwkt_switch)
812	pushq	%rbp	/* JG note: GDB hacked to locate ebp rel to td_sp */
813	pushq	%rbx
814	movq	PCPU(curthread),%rbx	/* becomes old thread in restore */
815	pushq	%r12
816	pushq	%r13
817	pushq	%r14
818	pushq	%r15
819	pushfq
820	cli
821
822#if 1
823	/*
824	 * Save the FP state if we have used the FP.  Note that calling
825	 * npxsave will NULL out PCPU(npxthread).
826	 *
827	 * We have to deal with the FP state for LWKT threads in case they
828	 * happen to get preempted or block while doing an optimized
829	 * bzero/bcopy/memcpy.
830	 */
831	cmpq	%rbx,PCPU(npxthread)
832	jne	1f
833	movq	%rdi,%r12		/* save %rdi. %r12 is callee-saved */
834	movq	TD_SAVEFPU(%rbx),%rdi
835	call	npxsave			/* do it in a big C function */
836	movq	%r12,%rdi		/* restore %rdi */
8371:
838#endif
839
840	movq	%rdi,%rax		/* switch to this thread */
841	pushq	$cpu_lwkt_restore
842	movq	%rsp,TD_SP(%rbx)
843	/*
844	 * %rax contains new thread, %rbx contains old thread.
845	 */
846	movq	%rax,PCPU(curthread)
847	movq	TD_SP(%rax),%rsp
848	ret
849END(cpu_lwkt_switch)
850
851/*
852 * cpu_lwkt_restore()	(current thread in %rax on entry)
853 *
854 * Standard LWKT restore function.  This function is always called
855 * while in a critical section.
856 *
857 * WARNING! Due to preemption the restore function can be used to 'return'
858 *	    to the original thread.   Interrupt disablement must be
859 *	    protected through the switch so we cannot run splz here.
860 */
861ENTRY(cpu_lwkt_restore)
862#ifdef PREEMPT_OPTIMIZE
863	/*
864	 * If we are preempting someone we borrow their %cr3 and pmap
865	 */
866	movq	TD_PREEMPTED(%rax),%r14	/* kernel thread preempting? */
867	testq	%r14,%r14
868	jne	1f			/* yes, borrow %cr3 from old thread */
869#endif
870	/*
871	 * Don't reload %cr3 if it hasn't changed.  Since this is a LWKT
872	 * thread (a kernel thread), and the kernel_pmap always permanently
873	 * sets all pm_active bits, we don't have the same problem with it
874	 * that we do with process pmaps.
875	 */
876	movq	KPML4phys,%rcx
877	movq	%cr3,%rdx
878	cmpq	%rcx,%rdx
879	je	1f
880	movq	%rcx,%cr3
8811:
882	/*
883	 * NOTE: %rbx is the previous thread and %rax is the new thread.
884	 *	 %rbx is retained throughout so we can return it.
885	 *
886	 *	 lwkt_switch[_return] is responsible for handling TDF_RUNNING.
887	 */
888	movq	%rbx,%rax
889	popfq
890	popq	%r15
891	popq	%r14
892	popq	%r13
893	popq	%r12
894	popq	%rbx
895	popq	%rbp
896	ret
897END(cpu_lwkt_restore)
898