1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 66 */ 67 68#include <sys/rtprio.h> 69 70#include <machine/asmacros.h> 71#include <machine/segments.h> 72 73#include <machine/pmap.h> 74#if 0 /* JG */ 75#include <machine_base/apic/apicreg.h> 76#endif 77#include <machine/lock.h> 78 79#include "assym.s" 80 81#define MPLOCKED lock ; 82 83/* 84 * PREEMPT_OPTIMIZE 85 * 86 * This feature allows the preempting (interrupt) kernel thread to borrow 87 * %cr3 from the user process it interrupts, allowing us to do-away with 88 * two %cr3 stores, two atomic ops (pm_active is not modified), and pmap 89 * lock tests (not needed since pm_active is not modified). 90 * 91 * Unfortunately, I couldn't really measure any result so for now the 92 * optimization is disabled. 93 */ 94#undef PREEMPT_OPTIMIZE 95 96/* 97 * LWP_SWITCH_OPTIMIZE 98 * 99 * This optimization attempted to avoid a %cr3 store and atomic op, and 100 * it might have been useful on older cpus but newer cpus (and more 101 * importantly multi-core cpus) generally do not switch between LWPs on 102 * the same cpu. Multiple user threads are more likely to be distributed 103 * across multiple cpus. In cpu-bound situations the scheduler will already 104 * be in batch-mode (meaning relatively few context-switches/sec), and 105 * otherwise the lwp(s) are likely to be blocked waiting for events. 106 * 107 * On the flip side, the conditionals this option uses measurably reduce 108 * performance (just slightly, honestly). So this option is disabled. 109 */ 110#undef LWP_SWITCH_OPTIMIZE 111 112 /* 113 * Global Declarations 114 */ 115 .data 116 117 .globl panic 118 .globl lwkt_switch_return 119 120#if defined(SWTCH_OPTIM_STATS) 121 .globl swtch_optim_stats, tlb_flush_count 122swtch_optim_stats: .long 0 /* number of _swtch_optims */ 123tlb_flush_count: .long 0 124#endif 125 126 /* 127 * Code 128 */ 129 .text 130 131/* 132 * cpu_heavy_switch(struct thread *next_thread) 133 * 134 * Switch from the current thread to a new thread. This entry 135 * is normally called via the thread->td_switch function, and will 136 * only be called when the current thread is a heavy weight process. 137 * 138 * Some instructions have been reordered to reduce pipeline stalls. 139 * 140 * YYY disable interrupts once giant is removed. 141 */ 142ENTRY(cpu_heavy_switch) 143 /* 144 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 145 */ 146 movq PCPU(curthread),%rcx 147 /* On top of the stack is the return adress. */ 148 movq (%rsp),%rax /* (reorder optimization) */ 149 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 150 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 151 movq %rbx,PCB_RBX(%rdx) 152 movq %rsp,PCB_RSP(%rdx) 153 movq %rbp,PCB_RBP(%rdx) 154 movq %r12,PCB_R12(%rdx) 155 movq %r13,PCB_R13(%rdx) 156 movq %r14,PCB_R14(%rdx) 157 movq %r15,PCB_R15(%rdx) 158 159 /* 160 * Clear the cpu bit in the pmap active mask. The restore 161 * function will set the bit in the pmap active mask. 162 * 163 * If we are switching away due to a preempt, TD_PREEMPTED(%rdi) 164 * will be non-NULL. In this situation we do want to avoid extra 165 * atomic ops and %cr3 reloads (see top of file for reasoning). 166 * 167 * NOTE: Do not try to optimize avoiding the %cr3 reload or pm_active 168 * adjustment. This mattered on uni-processor systems but in 169 * multi-core systems we are highly unlikely to be switching 170 * to another thread belonging to the same process on this cpu. 171 * 172 * (more likely the target thread is still sleeping, or if cpu- 173 * bound the scheduler is in batch mode and the switch rate is 174 * already low). 175 */ 176 movq %rcx,%rbx /* RBX = oldthread */ 177#ifdef PREEMPT_OPTIMIZE 178 /* 179 * If we are being preempted the target thread borrows our %cr3 180 * and we leave our pmap bits intact for the duration. 181 */ 182 movq TD_PREEMPTED(%rdi),%r13 183 testq %r13,%r13 184 jne 2f 185#endif 186 187 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 188 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 189#ifdef LWP_SWITCH_OPTIMIZE 190 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 191 testq %r13,%r13 /* might not be a heavy */ 192 jz 1f 193 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 194 je 2f 1951: 196#endif 197 movq PCPU(cpumask_simple),%rsi 198 movq PCPU(cpumask_offset),%r12 199 xorq $-1,%rsi 200 MPLOCKED andq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1) 2012: 202 203 /* 204 * Push the LWKT switch restore function, which resumes a heavy 205 * weight process. Note that the LWKT switcher is based on 206 * TD_SP, while the heavy weight process switcher is based on 207 * PCB_RSP. TD_SP is usually two ints pushed relative to 208 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 209 */ 210 pushfq 211 cli 212 movq $cpu_heavy_restore, %rax 213 pushq %rax 214 movq %rsp,TD_SP(%rbx) 215 216 /* 217 * Save debug regs if necessary 218 */ 219 movq PCB_FLAGS(%rdx),%rax 220 andq $PCB_DBREGS,%rax 221 jz 1f /* no, skip over */ 222 movq %dr7,%rax /* yes, do the save */ 223 movq %rax,PCB_DR7(%rdx) 224 /* JG correct value? */ 225 andq $0x0000fc00, %rax /* disable all watchpoints */ 226 movq %rax,%dr7 227 movq %dr6,%rax 228 movq %rax,PCB_DR6(%rdx) 229 movq %dr3,%rax 230 movq %rax,PCB_DR3(%rdx) 231 movq %dr2,%rax 232 movq %rax,PCB_DR2(%rdx) 233 movq %dr1,%rax 234 movq %rax,PCB_DR1(%rdx) 235 movq %dr0,%rax 236 movq %rax,PCB_DR0(%rdx) 2371: 238 239 /* 240 * Save the FP state if we have used the FP. Note that calling 241 * npxsave will NULL out PCPU(npxthread). 242 */ 243 cmpq %rbx,PCPU(npxthread) 244 jne 1f 245 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 246 movq TD_SAVEFPU(%rbx),%rdi 247 call npxsave /* do it in a big C function */ 248 movq %r12,%rdi /* restore %rdi */ 2491: 250 251 /* 252 * Switch to the next thread, which was passed as an argument 253 * to cpu_heavy_switch(). The argument is in %rdi. 254 * Set the current thread, load the stack pointer, 255 * and 'ret' into the switch-restore function. 256 * 257 * The switch restore function expects the new thread to be in %rax 258 * and the old one to be in %rbx. 259 * 260 * There is a one-instruction window where curthread is the new 261 * thread but %rsp still points to the old thread's stack, but 262 * we are protected by a critical section so it is ok. 263 */ 264 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 265 movq %rax,PCPU(curthread) 266 movq TD_SP(%rax),%rsp 267 ret 268END(cpu_heavy_switch) 269 270/* 271 * cpu_exit_switch(struct thread *next) 272 * 273 * The switch function is changed to this when a thread is going away 274 * for good. We have to ensure that the MMU state is not cached, and 275 * we don't bother saving the existing thread state before switching. 276 * 277 * At this point we are in a critical section and this cpu owns the 278 * thread's token, which serves as an interlock until the switchout is 279 * complete. 280 */ 281ENTRY(cpu_exit_switch) 282 283#ifdef PREEMPT_OPTIMIZE 284 /* 285 * If we were preempting we are switching back to the original thread. 286 * In this situation we already have the original thread's %cr3 and 287 * should not replace it! 288 */ 289 testl $TDF_PREEMPT_DONE, TD_FLAGS(%rdi) 290 jne 1f 291#endif 292 293 /* 294 * Get us out of the vmspace 295 */ 296 movq KPML4phys,%rcx 297 movq %cr3,%rax 298 cmpq %rcx,%rax 299 je 1f 300 301 movq %rcx,%cr3 3021: 303 movq PCPU(curthread),%rbx 304 305 /* 306 * If this is a process/lwp, deactivate the pmap after we've 307 * switched it out. 308 */ 309 movq TD_LWP(%rbx),%rcx 310 testq %rcx,%rcx 311 jz 2f 312 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 313 314 movq PCPU(cpumask_simple),%rax 315 movq PCPU(cpumask_offset),%r12 316 xorq $-1,%rax 317 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1) 3182: 319 /* 320 * Switch to the next thread. RET into the restore function, which 321 * expects the new thread in RAX and the old in RBX. 322 * 323 * There is a one-instruction window where curthread is the new 324 * thread but %rsp still points to the old thread's stack, but 325 * we are protected by a critical section so it is ok. 326 */ 327 cli 328 movq %rdi,%rax 329 movq %rax,PCPU(curthread) 330 movq TD_SP(%rax),%rsp 331 ret 332END(cpu_exit_switch) 333 334/* 335 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx) 336 * 337 * We immediately move %rax to %r12. %rbx is retained throughout, and 338 * we nominally use %r14 for TD_PCB(%r12) until near the end where we 339 * switch to %rdx for that. 340 * 341 * Restore the thread after an LWKT switch. This entry is normally 342 * called via the LWKT switch restore function, which was pulled 343 * off the thread stack and jumped to. 344 * 345 * This entry is only called if the thread was previously saved 346 * using cpu_heavy_switch() (the heavy weight process thread switcher), 347 * or when a new process is initially scheduled. 348 * 349 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 350 * a preemption switch may interrupt the process and then return via 351 * cpu_heavy_restore. 352 * 353 * YYY theoretically we do not have to restore everything here, a lot 354 * of this junk can wait until we return to usermode. But for now 355 * we restore everything. 356 * 357 * YYY the PCB crap is really crap, it makes startup a bitch because 358 * we can't switch away. 359 * 360 * YYY note: spl check is done in mi_switch when it splx()'s. 361 */ 362 363ENTRY(cpu_heavy_restore) 364 movq %rax,%r12 /* R12 = newtd */ 365 movq TD_PCB(%rax),%r14 /* R14 = PCB */ 366 movq %r14, PCPU(trampoline)+TR_PCB_RSP 367 movq PCB_FLAGS(%r14), %rcx 368 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS 369 movq PCB_CR3_ISO(%r14), %rcx 370 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO 371 movq PCB_CR3(%r14), %rcx 372 movq %rcx, PCPU(trampoline)+TR_PCB_CR3 373 popfq 374 375#if defined(SWTCH_OPTIM_STATS) 376 incl _swtch_optim_stats 377#endif 378#ifdef PREEMPT_OPTIMIZE 379 /* 380 * If restoring our thread after a preemption has returned to 381 * us, our %cr3 and pmap were borrowed and are being returned to 382 * us and no further action on those items need be taken. 383 */ 384 testl $TDF_PREEMPT_DONE, TD_FLAGS(%r12) 385 jne 4f 386#endif 387 388 /* 389 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 390 * safely test/reload %cr3 until after we have set the bit in the 391 * pmap. 392 * 393 * We must do an interlocked test of the CPULOCK_EXCL at the same 394 * time. If found to be set we will have to wait for it to clear 395 * and then do a forced reload of %cr3 (even if the value matches). 396 * 397 * XXX When switching between two LWPs sharing the same vmspace 398 * the cpu_heavy_switch() code currently avoids clearing the 399 * cpu bit in PM_ACTIVE. So if the bit is already set we can 400 * avoid checking for the interlock via CPULOCK_EXCL. We currently 401 * do not perform this optimization. 402 */ 403 movq TD_LWP(%r12),%rcx 404 movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */ 405 406 movq PCPU(cpumask_simple),%rsi 407 movq PCPU(cpumask_offset),%r13 408 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r13, 1) 409 410 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi 411 testl $CPULOCK_EXCL,%esi 412 jz 1f 413 414 movq %rcx,%rdi /* (found to be set) */ 415 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 416 417 /* 418 * Need unconditional load cr3 419 */ 420 movq PCB_CR3(%r14),%rcx /* RCX = desired CR3 */ 421 jmp 2f /* unconditional reload */ 4221: 423 /* 424 * Restore the MMU address space. If it is the same as the last 425 * thread we don't have to invalidate the tlb (i.e. reload cr3). 426 * 427 * XXX Temporary cludge, do NOT do this optimization! The problem 428 * is that the pm_active bit for the cpu had dropped for a small 429 * period of time, just a few cycles, but even one cycle is long 430 * enough for some other cpu doing a pmap invalidation to not see 431 * our cpu. 432 * 433 * When that happens, and we don't invltlb (by loading %cr3), we 434 * wind up with a stale TLB. 435 */ 436 movq %cr3,%rsi /* RSI = current CR3 */ 437 movq PCB_CR3(%r14),%rcx /* RCX = desired CR3 */ 438 cmpq %rsi,%rcx 439 /*je 4f*/ 4402: 441#if defined(SWTCH_OPTIM_STATS) 442 decl _swtch_optim_stats 443 incl _tlb_flush_count 444#endif 445 movq %rcx,%cr3 4464: 447 448 /* 449 * NOTE: %rbx is the previous thread and %r12 is the new thread. 450 * %rbx is retained throughout so we can return it. 451 * 452 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 453 */ 454 455 /* 456 * Deal with the PCB extension, restore the private tss 457 */ 458 movq PCB_EXT(%r14),%rdi /* check for a PCB extension */ 459 movq $1,%rcx /* maybe mark use of a private tss */ 460 testq %rdi,%rdi 461#if 0 /* JG */ 462 jnz 2f 463#endif 464 465#if 0 466 /* 467 * Going back to the common_tss. (this was already executed at 468 * the top). 469 * 470 * Set the top of the supervisor stack for the new thread 471 * in gd_thread_pcb so the trampoline code can load it into %rsp. 472 */ 473 movq %r14, PCPU(trampoline)+TR_PCB_RSP 474 movq PCB_FLAGS(%r14), %rcx 475 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS 476 movq PCB_CR3_ISO(%r14), %rcx 477 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO 478 movq PCB_CR3(%r14), %rcx 479 movq %rcx, PCPU(trampoline)+TR_PCB_CR3 480#endif 481 482#if 0 /* JG */ 483 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 484 je 3f /* already using the common TSS */ 485 486 /* JG? */ 487 subq %rcx,%rcx /* unmark use of private tss */ 488 489 /* 490 * Get the address of the common TSS descriptor for the ltr. 491 * There is no way to get the address of a segment-accessed variable 492 * so we store a self-referential pointer at the base of the per-cpu 493 * data area and add the appropriate offset. 494 */ 495 /* JG movl? */ 496 movq $gd_common_tssd, %rdi 497 /* JG name for "%gs:0"? */ 498 addq %gs:0, %rdi 499 500 /* 501 * Move the correct TSS descriptor into the GDT slot, then reload 502 * ltr. 503 */ 5042: 505 /* JG */ 506 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 507 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 508 movq 0(%rdi), %rax 509 movq %rax, 0(%rbx) 510 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 511 ltr %si 512#endif 513 5143: 515 /* 516 * Restore the user %gs and %fs 517 */ 518 movq PCB_FSBASE(%r14),%r9 519 cmpq PCPU(user_fs),%r9 520 je 4f 521 movq %r9,PCPU(user_fs) 522 movl $MSR_FSBASE,%ecx 523 movl PCB_FSBASE(%r14),%eax 524 movl PCB_FSBASE+4(%r14),%edx 525 wrmsr 5264: 527 movq PCB_GSBASE(%r14),%r9 528 cmpq PCPU(user_gs),%r9 529 je 5f 530 movq %r9,PCPU(user_gs) 531 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 532 movl PCB_GSBASE(%r14),%eax 533 movl PCB_GSBASE+4(%r14),%edx 534 wrmsr 5355: 536 /* 537 * Actively restore FP state 538 */ 539 movq PCPU(npxthread),%r13 540 testq %r13,%r13 541 jnz 6f 542 movl TD_FLAGS(%r12),%r13d 543 andq $TDF_USINGFP,%r13 544 jz 6f 545 movq %r12,%rdi /* npxdna_quick(newtd) */ 546 call npxdna_quick 5476: 548 549 /* 550 * Restore general registers. %rbx is restored later. 551 * 552 * Switch our PCB register from %r14 to %rdx so we can restore 553 * %r14. 554 */ 555 movq %r14,%rdx 556 movq PCB_RSP(%rdx), %rsp 557 movq PCB_RBP(%rdx), %rbp 558 movq PCB_R12(%rdx), %r12 559 movq PCB_R13(%rdx), %r13 560 movq PCB_R14(%rdx), %r14 561 movq PCB_R15(%rdx), %r15 562 movq PCB_RIP(%rdx), %rax 563 movq %rax, (%rsp) 564 movw $KDSEL,%ax 565 movw %ax,%es 566 567#if 0 /* JG */ 568 /* 569 * Restore the user LDT if we have one 570 */ 571 cmpl $0, PCB_USERLDT(%edx) 572 jnz 1f 573 movl _default_ldt,%eax 574 cmpl PCPU(currentldt),%eax 575 je 2f 576 lldt _default_ldt 577 movl %eax,PCPU(currentldt) 578 jmp 2f 5791: pushl %edx 580 call set_user_ldt 581 popl %edx 5822: 583#endif 584#if 0 /* JG */ 585 /* 586 * Restore the user TLS if we have one 587 */ 588 pushl %edx 589 call set_user_TLS 590 popl %edx 591#endif 592 593 /* 594 * Restore the DEBUG register state if necessary. 595 */ 596 movq PCB_FLAGS(%rdx),%rax 597 andq $PCB_DBREGS,%rax 598 jz 1f /* no, skip over */ 599 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 600 movq %rax,%dr6 601 movq PCB_DR3(%rdx),%rax 602 movq %rax,%dr3 603 movq PCB_DR2(%rdx),%rax 604 movq %rax,%dr2 605 movq PCB_DR1(%rdx),%rax 606 movq %rax,%dr1 607 movq PCB_DR0(%rdx),%rax 608 movq %rax,%dr0 609 movq %dr7,%rax /* load dr7 so as not to disturb */ 610 /* JG correct value? */ 611 andq $0x0000fc00,%rax /* reserved bits */ 612 /* JG we've got more registers on x86_64 */ 613 movq PCB_DR7(%rdx),%rcx 614 /* JG correct value? */ 615 andq $~0x0000fc00,%rcx 616 orq %rcx,%rax 617 movq %rax,%dr7 618 619 /* 620 * Clear the QUICKRET flag when restoring a user process context 621 * so we don't try to do a quick syscall return. 622 */ 6231: 624 andl $~RQF_QUICKRET,PCPU(reqflags) 625 movq %rbx,%rax 626 movq PCB_RBX(%rdx),%rbx 627 ret 628END(cpu_heavy_restore) 629 630/* 631 * savectx(struct pcb *pcb) 632 * 633 * Update pcb, saving current processor state. 634 */ 635ENTRY(savectx) 636 /* fetch PCB */ 637 /* JG use %rdi instead of %rcx everywhere? */ 638 movq %rdi,%rcx 639 640 /* caller's return address - child won't execute this routine */ 641 movq (%rsp),%rax 642 movq %rax,PCB_RIP(%rcx) 643 644 movq %cr3,%rax 645 movq %rax,PCB_CR3(%rcx) 646 647 movq %rbx,PCB_RBX(%rcx) 648 movq %rsp,PCB_RSP(%rcx) 649 movq %rbp,PCB_RBP(%rcx) 650 movq %r12,PCB_R12(%rcx) 651 movq %r13,PCB_R13(%rcx) 652 movq %r14,PCB_R14(%rcx) 653 movq %r15,PCB_R15(%rcx) 654 655#if 1 656 /* 657 * If npxthread == NULL, then the npx h/w state is irrelevant and the 658 * state had better already be in the pcb. This is true for forks 659 * but not for dumps (the old book-keeping with FP flags in the pcb 660 * always lost for dumps because the dump pcb has 0 flags). 661 * 662 * If npxthread != NULL, then we have to save the npx h/w state to 663 * npxthread's pcb and copy it to the requested pcb, or save to the 664 * requested pcb and reload. Copying is easier because we would 665 * have to handle h/w bugs for reloading. We used to lose the 666 * parent's npx state for forks by forgetting to reload. 667 */ 668 movq PCPU(npxthread),%rax 669 testq %rax,%rax 670 jz 1f 671 672 pushq %rcx /* target pcb */ 673 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 674 pushq %rax 675 676 movq %rax,%rdi 677 call npxsave 678 679 popq %rax 680 popq %rcx 681 682 movq $PCB_SAVEFPU_SIZE,%rdx 683 leaq PCB_SAVEFPU(%rcx),%rcx 684 movq %rcx,%rsi 685 movq %rax,%rdi 686 call bcopy 687#endif 688 6891: 690 ret 691END(savectx) 692 693/* 694 * cpu_idle_restore() (current thread in %rax on entry, old thread in %rbx) 695 * (one-time entry) 696 * 697 * Don't bother setting up any regs other than %rbp so backtraces 698 * don't die. This restore function is used to bootstrap into the 699 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 700 * switching. 701 * 702 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 703 * This only occurs during system boot so no special handling is 704 * required for migration. 705 * 706 * If we are an AP we have to call ap_init() before jumping to 707 * cpu_idle(). ap_init() will synchronize with the BP and finish 708 * setting up various ncpu-dependant globaldata fields. This may 709 * happen on UP as well as SMP if we happen to be simulating multiple 710 * cpus. 711 */ 712ENTRY(cpu_idle_restore) 713 /* cli */ 714 movq KPML4phys,%rcx 715 xorq %rbp,%rbp /* dummy frame pointer */ 716 pushq $0 /* dummy return pc */ 717 718 /* NOTE: idle thread can never preempt */ 719 movq %rcx,%cr3 720 cmpl $0,PCPU(cpuid) 721 je 1f 722 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 723 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 724 call ap_init 725 /* 726 * ap_init can decide to enable interrupts early, but otherwise, or if 727 * we are UP, do it here. 728 */ 729 sti 730 jmp cpu_idle 731 732 /* 733 * cpu 0's idle thread entry for the first time must use normal 734 * lwkt_switch_return() semantics or a pending cpu migration on 735 * thread0 will deadlock. 736 */ 7371: 738 sti 739 pushq %rax 740 movq %rbx,%rdi 741 call lwkt_switch_return 742 popq %rax 743 jmp cpu_idle 744END(cpu_idle_restore) 745 746/* 747 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx) 748 * (one-time execution) 749 * 750 * Don't bother setting up any regs other then %rbp so backtraces 751 * don't die. This restore function is used to bootstrap into an 752 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 753 * after this. 754 * 755 * Because this switch target does not 'return' to lwkt_switch() 756 * we have to call lwkt_switch_return(otd) to clean up otd. 757 * otd is in %ebx. 758 * 759 * Since all of our context is on the stack we are reentrant and 760 * we can release our critical section and enable interrupts early. 761 */ 762ENTRY(cpu_kthread_restore) 763 sti 764 movq KPML4phys,%rcx 765 movq TD_PCB(%rax),%r13 766 xorq %rbp,%rbp 767 768#ifdef PREEMPT_OPTIMIZE 769 /* 770 * If we are preempting someone we borrow their %cr3, do not overwrite 771 * it! 772 */ 773 movq TD_PREEMPTED(%rax),%r14 774 testq %r14,%r14 775 jne 1f 776#endif 777 movq %rcx,%cr3 7781: 779 780 /* 781 * rax and rbx come from the switchout code. Call 782 * lwkt_switch_return(otd). 783 * 784 * NOTE: unlike i386, the %rsi and %rdi are not call-saved regs. 785 */ 786 pushq %rax 787 movq %rbx,%rdi 788 call lwkt_switch_return 789 popq %rax 790 decl TD_CRITCOUNT(%rax) 791 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 792 movq PCB_RBX(%r13),%rax /* thread function */ 793 /* note: top of stack return address inherited by function */ 794 jmp *%rax 795END(cpu_kthread_restore) 796 797/* 798 * cpu_lwkt_switch(struct thread *) 799 * 800 * Standard LWKT switching function. Only non-scratch registers are 801 * saved and we don't bother with the MMU state or anything else. 802 * 803 * This function is always called while in a critical section. 804 * 805 * There is a one-instruction window where curthread is the new 806 * thread but %rsp still points to the old thread's stack, but 807 * we are protected by a critical section so it is ok. 808 */ 809ENTRY(cpu_lwkt_switch) 810 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */ 811 pushq %rbx 812 movq PCPU(curthread),%rbx /* becomes old thread in restore */ 813 pushq %r12 814 pushq %r13 815 pushq %r14 816 pushq %r15 817 pushfq 818 cli 819 820#if 1 821 /* 822 * Save the FP state if we have used the FP. Note that calling 823 * npxsave will NULL out PCPU(npxthread). 824 * 825 * We have to deal with the FP state for LWKT threads in case they 826 * happen to get preempted or block while doing an optimized 827 * bzero/bcopy/memcpy. 828 */ 829 cmpq %rbx,PCPU(npxthread) 830 jne 1f 831 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 832 movq TD_SAVEFPU(%rbx),%rdi 833 call npxsave /* do it in a big C function */ 834 movq %r12,%rdi /* restore %rdi */ 8351: 836#endif 837 838 movq %rdi,%rax /* switch to this thread */ 839 pushq $cpu_lwkt_restore 840 movq %rsp,TD_SP(%rbx) 841 /* 842 * %rax contains new thread, %rbx contains old thread. 843 */ 844 movq %rax,PCPU(curthread) 845 movq TD_SP(%rax),%rsp 846 ret 847END(cpu_lwkt_switch) 848 849/* 850 * cpu_lwkt_restore() (current thread in %rax on entry) 851 * 852 * Standard LWKT restore function. This function is always called 853 * while in a critical section. 854 * 855 * WARNING! Due to preemption the restore function can be used to 'return' 856 * to the original thread. Interrupt disablement must be 857 * protected through the switch so we cannot run splz here. 858 */ 859ENTRY(cpu_lwkt_restore) 860#ifdef PREEMPT_OPTIMIZE 861 /* 862 * If we are preempting someone we borrow their %cr3 and pmap 863 */ 864 movq TD_PREEMPTED(%rax),%r14 /* kernel thread preempting? */ 865 testq %r14,%r14 866 jne 1f /* yes, borrow %cr3 from old thread */ 867#endif 868 /* 869 * Don't reload %cr3 if it hasn't changed. Since this is a LWKT 870 * thread (a kernel thread), and the kernel_pmap always permanently 871 * sets all pm_active bits, we don't have the same problem with it 872 * that we do with process pmaps. 873 */ 874 movq KPML4phys,%rcx 875 movq %cr3,%rdx 876 cmpq %rcx,%rdx 877 je 1f 878 movq %rcx,%cr3 8791: 880 /* 881 * NOTE: %rbx is the previous thread and %rax is the new thread. 882 * %rbx is retained throughout so we can return it. 883 * 884 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 885 */ 886 movq %rbx,%rax 887 popfq 888 popq %r15 889 popq %r14 890 popq %r13 891 popq %r12 892 popq %rbx 893 popq %rbp 894 ret 895END(cpu_lwkt_restore) 896