1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 66 */ 67 68//#include "use_npx.h" 69 70#include <sys/rtprio.h> 71 72#include <machine/asmacros.h> 73#include <machine/segments.h> 74 75#include <machine/pmap.h> 76#if 0 /* JG */ 77#include <machine_base/apic/apicreg.h> 78#endif 79#include <machine/lock.h> 80 81#include "assym.s" 82 83#define MPLOCKED lock ; 84 85/* 86 * PREEMPT_OPTIMIZE 87 * 88 * This feature allows the preempting (interrupt) kernel thread to borrow 89 * %cr3 from the user process it interrupts, allowing us to do-away with 90 * two %cr3 stores, two atomic ops (pm_active is not modified), and pmap 91 * lock tests (not needed since pm_active is not modified). 92 * 93 * Unfortunately, I couldn't really measure any result so for now the 94 * optimization is disabled. 95 */ 96#undef PREEMPT_OPTIMIZE 97 98/* 99 * LWP_SWITCH_OPTIMIZE 100 * 101 * This optimization attempted to avoid a %cr3 store and atomic op, and 102 * it might have been useful on older cpus but newer cpus (and more 103 * importantly multi-core cpus) generally do not switch between LWPs on 104 * the same cpu. Multiple user threads are more likely to be distributed 105 * across multiple cpus. In cpu-bound situations the scheduler will already 106 * be in batch-mode (meaning relatively few context-switches/sec), and 107 * otherwise the lwp(s) are likely to be blocked waiting for events. 108 * 109 * On the flip side, the conditionals this option uses measurably reduce 110 * performance (just slightly, honestly). So this option is disabled. 111 */ 112#undef LWP_SWITCH_OPTIMIZE 113 114 /* 115 * Global Declarations 116 */ 117 .data 118 119 .globl panic 120 .globl lwkt_switch_return 121 122#if defined(SWTCH_OPTIM_STATS) 123 .globl swtch_optim_stats, tlb_flush_count 124swtch_optim_stats: .long 0 /* number of _swtch_optims */ 125tlb_flush_count: .long 0 126#endif 127 128 /* 129 * Code 130 */ 131 .text 132 133/* 134 * cpu_heavy_switch(struct thread *next_thread) 135 * 136 * Switch from the current thread to a new thread. This entry 137 * is normally called via the thread->td_switch function, and will 138 * only be called when the current thread is a heavy weight process. 139 * 140 * Some instructions have been reordered to reduce pipeline stalls. 141 * 142 * YYY disable interrupts once giant is removed. 143 */ 144ENTRY(cpu_heavy_switch) 145 /* 146 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 147 */ 148 movq PCPU(curthread),%rcx 149 /* On top of the stack is the return adress. */ 150 movq (%rsp),%rax /* (reorder optimization) */ 151 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 152 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 153 movq %rbx,PCB_RBX(%rdx) 154 movq %rsp,PCB_RSP(%rdx) 155 movq %rbp,PCB_RBP(%rdx) 156 movq %r12,PCB_R12(%rdx) 157 movq %r13,PCB_R13(%rdx) 158 movq %r14,PCB_R14(%rdx) 159 movq %r15,PCB_R15(%rdx) 160 161 /* 162 * Clear the cpu bit in the pmap active mask. The restore 163 * function will set the bit in the pmap active mask. 164 * 165 * If we are switching away due to a preempt, TD_PREEMPTED(%rdi) 166 * will be non-NULL. In this situation we do want to avoid extra 167 * atomic ops and %cr3 reloads (see top of file for reasoning). 168 * 169 * NOTE: Do not try to optimize avoiding the %cr3 reload or pm_active 170 * adjustment. This mattered on uni-processor systems but in 171 * multi-core systems we are highly unlikely to be switching 172 * to another thread belonging to the same process on this cpu. 173 * 174 * (more likely the target thread is still sleeping, or if cpu- 175 * bound the scheduler is in batch mode and the switch rate is 176 * already low). 177 */ 178 movq %rcx,%rbx /* RBX = oldthread */ 179#ifdef PREEMPT_OPTIMIZE 180 /* 181 * If we are being preempted the target thread borrows our %cr3 182 * and we leave our pmap bits intact for the duration. 183 */ 184 movq TD_PREEMPTED(%rdi),%r13 185 testq %r13,%r13 186 jne 2f 187#endif 188 189 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 190 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 191#ifdef LWP_SWITCH_OPTIMIZE 192 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 193 testq %r13,%r13 /* might not be a heavy */ 194 jz 1f 195 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 196 je 2f 1971: 198#endif 199 movq PCPU(cpumask_simple),%rsi 200 movq PCPU(cpumask_offset),%r12 201 xorq $-1,%rsi 202 MPLOCKED andq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1) 2032: 204 205 /* 206 * Push the LWKT switch restore function, which resumes a heavy 207 * weight process. Note that the LWKT switcher is based on 208 * TD_SP, while the heavy weight process switcher is based on 209 * PCB_RSP. TD_SP is usually two ints pushed relative to 210 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 211 */ 212 pushfq 213 cli 214 movq $cpu_heavy_restore, %rax 215 pushq %rax 216 movq %rsp,TD_SP(%rbx) 217 218 /* 219 * Save debug regs if necessary 220 */ 221 movq PCB_FLAGS(%rdx),%rax 222 andq $PCB_DBREGS,%rax 223 jz 1f /* no, skip over */ 224 movq %dr7,%rax /* yes, do the save */ 225 movq %rax,PCB_DR7(%rdx) 226 /* JG correct value? */ 227 andq $0x0000fc00, %rax /* disable all watchpoints */ 228 movq %rax,%dr7 229 movq %dr6,%rax 230 movq %rax,PCB_DR6(%rdx) 231 movq %dr3,%rax 232 movq %rax,PCB_DR3(%rdx) 233 movq %dr2,%rax 234 movq %rax,PCB_DR2(%rdx) 235 movq %dr1,%rax 236 movq %rax,PCB_DR1(%rdx) 237 movq %dr0,%rax 238 movq %rax,PCB_DR0(%rdx) 2391: 240 241#if 1 242 /* 243 * Save the FP state if we have used the FP. Note that calling 244 * npxsave will NULL out PCPU(npxthread). 245 */ 246 cmpq %rbx,PCPU(npxthread) 247 jne 1f 248 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 249 movq TD_SAVEFPU(%rbx),%rdi 250 call npxsave /* do it in a big C function */ 251 movq %r12,%rdi /* restore %rdi */ 2521: 253#endif 254 255 /* 256 * Switch to the next thread, which was passed as an argument 257 * to cpu_heavy_switch(). The argument is in %rdi. 258 * Set the current thread, load the stack pointer, 259 * and 'ret' into the switch-restore function. 260 * 261 * The switch restore function expects the new thread to be in %rax 262 * and the old one to be in %rbx. 263 * 264 * There is a one-instruction window where curthread is the new 265 * thread but %rsp still points to the old thread's stack, but 266 * we are protected by a critical section so it is ok. 267 */ 268 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 269 movq %rax,PCPU(curthread) 270 movq TD_SP(%rax),%rsp 271 ret 272END(cpu_heavy_switch) 273 274/* 275 * cpu_exit_switch(struct thread *next) 276 * 277 * The switch function is changed to this when a thread is going away 278 * for good. We have to ensure that the MMU state is not cached, and 279 * we don't bother saving the existing thread state before switching. 280 * 281 * At this point we are in a critical section and this cpu owns the 282 * thread's token, which serves as an interlock until the switchout is 283 * complete. 284 */ 285ENTRY(cpu_exit_switch) 286 287#ifdef PREEMPT_OPTIMIZE 288 /* 289 * If we were preempting we are switching back to the original thread. 290 * In this situation we already have the original thread's %cr3 and 291 * should not replace it! 292 */ 293 testl $TDF_PREEMPT_DONE, TD_FLAGS(%rdi) 294 jne 1f 295#endif 296 297 /* 298 * Get us out of the vmspace 299 */ 300 movq KPML4phys,%rcx 301 movq %cr3,%rax 302 cmpq %rcx,%rax 303 je 1f 304 305 movq %rcx,%cr3 3061: 307 movq PCPU(curthread),%rbx 308 309 /* 310 * If this is a process/lwp, deactivate the pmap after we've 311 * switched it out. 312 */ 313 movq TD_LWP(%rbx),%rcx 314 testq %rcx,%rcx 315 jz 2f 316 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 317 318 movq PCPU(cpumask_simple),%rax 319 movq PCPU(cpumask_offset),%r12 320 xorq $-1,%rax 321 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1) 3222: 323 /* 324 * Switch to the next thread. RET into the restore function, which 325 * expects the new thread in RAX and the old in RBX. 326 * 327 * There is a one-instruction window where curthread is the new 328 * thread but %rsp still points to the old thread's stack, but 329 * we are protected by a critical section so it is ok. 330 */ 331 cli 332 movq %rdi,%rax 333 movq %rax,PCPU(curthread) 334 movq TD_SP(%rax),%rsp 335 ret 336END(cpu_exit_switch) 337 338/* 339 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx) 340 * 341 * Restore the thread after an LWKT switch. This entry is normally 342 * called via the LWKT switch restore function, which was pulled 343 * off the thread stack and jumped to. 344 * 345 * This entry is only called if the thread was previously saved 346 * using cpu_heavy_switch() (the heavy weight process thread switcher), 347 * or when a new process is initially scheduled. 348 * 349 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 350 * a preemption switch may interrupt the process and then return via 351 * cpu_heavy_restore. 352 * 353 * YYY theoretically we do not have to restore everything here, a lot 354 * of this junk can wait until we return to usermode. But for now 355 * we restore everything. 356 * 357 * YYY the PCB crap is really crap, it makes startup a bitch because 358 * we can't switch away. 359 * 360 * YYY note: spl check is done in mi_switch when it splx()'s. 361 */ 362 363ENTRY(cpu_heavy_restore) 364 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 365 movq %rdx, PCPU(trampoline)+TR_PCB_RSP 366 movq PCB_FLAGS(%rdx), %rcx 367 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS 368 movq PCB_CR3_ISO(%rdx), %rcx 369 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO 370 movq PCB_CR3(%rdx), %rcx 371 movq %rcx, PCPU(trampoline)+TR_PCB_CR3 372 popfq 373 374#if defined(SWTCH_OPTIM_STATS) 375 incl _swtch_optim_stats 376#endif 377#ifdef PREEMPT_OPTIMIZE 378 /* 379 * If restoring our thread after a preemption has returned to 380 * us, our %cr3 and pmap were borrowed and are being returned to 381 * us and no further action on those items need be taken. 382 */ 383 testl $TDF_PREEMPT_DONE, TD_FLAGS(%rax) 384 jne 4f 385#endif 386 387 /* 388 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 389 * safely test/reload %cr3 until after we have set the bit in the 390 * pmap. 391 * 392 * We must do an interlocked test of the CPULOCK_EXCL at the same 393 * time. If found to be set we will have to wait for it to clear 394 * and then do a forced reload of %cr3 (even if the value matches). 395 * 396 * XXX When switching between two LWPs sharing the same vmspace 397 * the cpu_heavy_switch() code currently avoids clearing the 398 * cpu bit in PM_ACTIVE. So if the bit is already set we can 399 * avoid checking for the interlock via CPULOCK_EXCL. We currently 400 * do not perform this optimization. 401 */ 402 movq TD_LWP(%rax),%rcx 403 movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */ 404 405 movq PCPU(cpumask_simple),%rsi 406 movq PCPU(cpumask_offset),%r12 407 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE(%rcx, %r12, 1) 408 409 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi 410 testl $CPULOCK_EXCL,%esi 411 jz 1f 412 413 movq %rax,%r12 /* save newthread ptr */ 414 movq %rcx,%rdi /* (found to be set) */ 415 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 416 movq %r12,%rax 417 418 /* 419 * Need unconditional load cr3 420 */ 421 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 422 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 423 jmp 2f /* unconditional reload */ 4241: 425 /* 426 * Restore the MMU address space. If it is the same as the last 427 * thread we don't have to invalidate the tlb (i.e. reload cr3). 428 * 429 * XXX Temporary cludge, do NOT do this optimization! The problem 430 * is that the pm_active bit for the cpu had dropped for a small 431 * period of time, just a few cycles, but even one cycle is long 432 * enough for some other cpu doing a pmap invalidation to not see 433 * our cpu. 434 * 435 * When that happens, and we don't invltlb (by loading %cr3), we 436 * wind up with a stale TLB. 437 */ 438 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 439 movq %cr3,%rsi /* RSI = current CR3 */ 440 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 441 cmpq %rsi,%rcx 442 /*je 4f*/ 4432: 444#if defined(SWTCH_OPTIM_STATS) 445 decl _swtch_optim_stats 446 incl _tlb_flush_count 447#endif 448 movq %rcx,%cr3 4494: 450 451 /* 452 * NOTE: %rbx is the previous thread and %rax is the new thread. 453 * %rbx is retained throughout so we can return it. 454 * 455 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 456 */ 457 458 /* 459 * Deal with the PCB extension, restore the private tss 460 */ 461 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 462 movq $1,%rcx /* maybe mark use of a private tss */ 463 testq %rdi,%rdi 464#if 0 /* JG */ 465 jnz 2f 466#endif 467 468#if 0 469 /* 470 * Going back to the common_tss. (this was already executed at 471 * the top). 472 * 473 * Set the top of the supervisor stack for the new thread 474 * in gd_thread_pcb so the trampoline code can load it into %rsp. 475 */ 476 movq %rdx, PCPU(trampoline)+TR_PCB_RSP 477 movq PCB_FLAGS(%rdx), %rcx 478 movq %rcx, PCPU(trampoline)+TR_PCB_FLAGS 479 movq PCB_CR3_ISO(%rdx), %rcx 480 movq %rcx, PCPU(trampoline)+TR_PCB_CR3_ISO 481 movq PCB_CR3(%rdx), %rcx 482 movq %rcx, PCPU(trampoline)+TR_PCB_CR3 483#endif 484 485#if 0 /* JG */ 486 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 487 je 3f /* already using the common TSS */ 488 489 /* JG? */ 490 subq %rcx,%rcx /* unmark use of private tss */ 491 492 /* 493 * Get the address of the common TSS descriptor for the ltr. 494 * There is no way to get the address of a segment-accessed variable 495 * so we store a self-referential pointer at the base of the per-cpu 496 * data area and add the appropriate offset. 497 */ 498 /* JG movl? */ 499 movq $gd_common_tssd, %rdi 500 /* JG name for "%gs:0"? */ 501 addq %gs:0, %rdi 502 503 /* 504 * Move the correct TSS descriptor into the GDT slot, then reload 505 * ltr. 506 */ 5072: 508 /* JG */ 509 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 510 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 511 movq 0(%rdi), %rax 512 movq %rax, 0(%rbx) 513 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 514 ltr %si 515#endif 516 5173: 518 /* 519 * Restore the user %gs and %fs 520 */ 521 movq PCB_FSBASE(%rdx),%r9 522 cmpq PCPU(user_fs),%r9 523 je 4f 524 movq %rdx,%r10 525 movq %r9,PCPU(user_fs) 526 movl $MSR_FSBASE,%ecx 527 movl PCB_FSBASE(%r10),%eax 528 movl PCB_FSBASE+4(%r10),%edx 529 wrmsr 530 movq %r10,%rdx 5314: 532 movq PCB_GSBASE(%rdx),%r9 533 cmpq PCPU(user_gs),%r9 534 je 5f 535 movq %rdx,%r10 536 movq %r9,PCPU(user_gs) 537 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 538 movl PCB_GSBASE(%r10),%eax 539 movl PCB_GSBASE+4(%r10),%edx 540 wrmsr 541 movq %r10,%rdx 5425: 543 544 /* 545 * Restore general registers. %rbx is restored later. 546 */ 547 movq PCB_RSP(%rdx), %rsp 548 movq PCB_RBP(%rdx), %rbp 549 movq PCB_R12(%rdx), %r12 550 movq PCB_R13(%rdx), %r13 551 movq PCB_R14(%rdx), %r14 552 movq PCB_R15(%rdx), %r15 553 movq PCB_RIP(%rdx), %rax 554 movq %rax, (%rsp) 555 movw $KDSEL,%ax 556 movw %ax,%es 557 558#if 0 /* JG */ 559 /* 560 * Restore the user LDT if we have one 561 */ 562 cmpl $0, PCB_USERLDT(%edx) 563 jnz 1f 564 movl _default_ldt,%eax 565 cmpl PCPU(currentldt),%eax 566 je 2f 567 lldt _default_ldt 568 movl %eax,PCPU(currentldt) 569 jmp 2f 5701: pushl %edx 571 call set_user_ldt 572 popl %edx 5732: 574#endif 575#if 0 /* JG */ 576 /* 577 * Restore the user TLS if we have one 578 */ 579 pushl %edx 580 call set_user_TLS 581 popl %edx 582#endif 583 584 /* 585 * Restore the DEBUG register state if necessary. 586 */ 587 movq PCB_FLAGS(%rdx),%rax 588 andq $PCB_DBREGS,%rax 589 jz 1f /* no, skip over */ 590 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 591 movq %rax,%dr6 592 movq PCB_DR3(%rdx),%rax 593 movq %rax,%dr3 594 movq PCB_DR2(%rdx),%rax 595 movq %rax,%dr2 596 movq PCB_DR1(%rdx),%rax 597 movq %rax,%dr1 598 movq PCB_DR0(%rdx),%rax 599 movq %rax,%dr0 600 movq %dr7,%rax /* load dr7 so as not to disturb */ 601 /* JG correct value? */ 602 andq $0x0000fc00,%rax /* reserved bits */ 603 /* JG we've got more registers on x86_64 */ 604 movq PCB_DR7(%rdx),%rcx 605 /* JG correct value? */ 606 andq $~0x0000fc00,%rcx 607 orq %rcx,%rax 608 movq %rax,%dr7 609 610 /* 611 * Clear the QUICKRET flag when restoring a user process context 612 * so we don't try to do a quick syscall return. 613 */ 6141: 615 andl $~RQF_QUICKRET,PCPU(reqflags) 616 movq %rbx,%rax 617 movq PCB_RBX(%rdx),%rbx 618 ret 619END(cpu_heavy_restore) 620 621/* 622 * savectx(struct pcb *pcb) 623 * 624 * Update pcb, saving current processor state. 625 */ 626ENTRY(savectx) 627 /* fetch PCB */ 628 /* JG use %rdi instead of %rcx everywhere? */ 629 movq %rdi,%rcx 630 631 /* caller's return address - child won't execute this routine */ 632 movq (%rsp),%rax 633 movq %rax,PCB_RIP(%rcx) 634 635 movq %cr3,%rax 636 movq %rax,PCB_CR3(%rcx) 637 638 movq %rbx,PCB_RBX(%rcx) 639 movq %rsp,PCB_RSP(%rcx) 640 movq %rbp,PCB_RBP(%rcx) 641 movq %r12,PCB_R12(%rcx) 642 movq %r13,PCB_R13(%rcx) 643 movq %r14,PCB_R14(%rcx) 644 movq %r15,PCB_R15(%rcx) 645 646#if 1 647 /* 648 * If npxthread == NULL, then the npx h/w state is irrelevant and the 649 * state had better already be in the pcb. This is true for forks 650 * but not for dumps (the old book-keeping with FP flags in the pcb 651 * always lost for dumps because the dump pcb has 0 flags). 652 * 653 * If npxthread != NULL, then we have to save the npx h/w state to 654 * npxthread's pcb and copy it to the requested pcb, or save to the 655 * requested pcb and reload. Copying is easier because we would 656 * have to handle h/w bugs for reloading. We used to lose the 657 * parent's npx state for forks by forgetting to reload. 658 */ 659 movq PCPU(npxthread),%rax 660 testq %rax,%rax 661 jz 1f 662 663 pushq %rcx /* target pcb */ 664 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 665 pushq %rax 666 667 movq %rax,%rdi 668 call npxsave 669 670 popq %rax 671 popq %rcx 672 673 movq $PCB_SAVEFPU_SIZE,%rdx 674 leaq PCB_SAVEFPU(%rcx),%rcx 675 movq %rcx,%rsi 676 movq %rax,%rdi 677 call bcopy 678#endif 679 6801: 681 ret 682END(savectx) 683 684/* 685 * cpu_idle_restore() (current thread in %rax on entry, old thread in %rbx) 686 * (one-time entry) 687 * 688 * Don't bother setting up any regs other than %rbp so backtraces 689 * don't die. This restore function is used to bootstrap into the 690 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 691 * switching. 692 * 693 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 694 * This only occurs during system boot so no special handling is 695 * required for migration. 696 * 697 * If we are an AP we have to call ap_init() before jumping to 698 * cpu_idle(). ap_init() will synchronize with the BP and finish 699 * setting up various ncpu-dependant globaldata fields. This may 700 * happen on UP as well as SMP if we happen to be simulating multiple 701 * cpus. 702 */ 703ENTRY(cpu_idle_restore) 704 /* cli */ 705 movq KPML4phys,%rcx 706 xorq %rbp,%rbp /* dummy frame pointer */ 707 pushq $0 /* dummy return pc */ 708 709 /* NOTE: idle thread can never preempt */ 710 movq %rcx,%cr3 711 cmpl $0,PCPU(cpuid) 712 je 1f 713 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 714 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 715 call ap_init 716 /* 717 * ap_init can decide to enable interrupts early, but otherwise, or if 718 * we are UP, do it here. 719 */ 720 sti 721 jmp cpu_idle 722 723 /* 724 * cpu 0's idle thread entry for the first time must use normal 725 * lwkt_switch_return() semantics or a pending cpu migration on 726 * thread0 will deadlock. 727 */ 7281: 729 sti 730 pushq %rax 731 movq %rbx,%rdi 732 call lwkt_switch_return 733 popq %rax 734 jmp cpu_idle 735END(cpu_idle_restore) 736 737/* 738 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx) 739 * (one-time execution) 740 * 741 * Don't bother setting up any regs other then %rbp so backtraces 742 * don't die. This restore function is used to bootstrap into an 743 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 744 * after this. 745 * 746 * Because this switch target does not 'return' to lwkt_switch() 747 * we have to call lwkt_switch_return(otd) to clean up otd. 748 * otd is in %ebx. 749 * 750 * Since all of our context is on the stack we are reentrant and 751 * we can release our critical section and enable interrupts early. 752 */ 753ENTRY(cpu_kthread_restore) 754 sti 755 movq KPML4phys,%rcx 756 movq TD_PCB(%rax),%r13 757 xorq %rbp,%rbp 758 759#ifdef PREEMPT_OPTIMIZE 760 /* 761 * If we are preempting someone we borrow their %cr3, do not overwrite 762 * it! 763 */ 764 movq TD_PREEMPTED(%rax),%r14 765 testq %r14,%r14 766 jne 1f 767#endif 768 movq %rcx,%cr3 7691: 770 771 /* 772 * rax and rbx come from the switchout code. Call 773 * lwkt_switch_return(otd). 774 * 775 * NOTE: unlike i386, the %rsi and %rdi are not call-saved regs. 776 */ 777 pushq %rax 778 movq %rbx,%rdi 779 call lwkt_switch_return 780 popq %rax 781 decl TD_CRITCOUNT(%rax) 782 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 783 movq PCB_RBX(%r13),%rax /* thread function */ 784 /* note: top of stack return address inherited by function */ 785 jmp *%rax 786END(cpu_kthread_restore) 787 788/* 789 * cpu_lwkt_switch(struct thread *) 790 * 791 * Standard LWKT switching function. Only non-scratch registers are 792 * saved and we don't bother with the MMU state or anything else. 793 * 794 * This function is always called while in a critical section. 795 * 796 * There is a one-instruction window where curthread is the new 797 * thread but %rsp still points to the old thread's stack, but 798 * we are protected by a critical section so it is ok. 799 */ 800ENTRY(cpu_lwkt_switch) 801 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */ 802 pushq %rbx 803 movq PCPU(curthread),%rbx /* becomes old thread in restore */ 804 pushq %r12 805 pushq %r13 806 pushq %r14 807 pushq %r15 808 pushfq 809 cli 810 811#if 1 812 /* 813 * Save the FP state if we have used the FP. Note that calling 814 * npxsave will NULL out PCPU(npxthread). 815 * 816 * We have to deal with the FP state for LWKT threads in case they 817 * happen to get preempted or block while doing an optimized 818 * bzero/bcopy/memcpy. 819 */ 820 cmpq %rbx,PCPU(npxthread) 821 jne 1f 822 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 823 movq TD_SAVEFPU(%rbx),%rdi 824 call npxsave /* do it in a big C function */ 825 movq %r12,%rdi /* restore %rdi */ 8261: 827#endif 828 829 movq %rdi,%rax /* switch to this thread */ 830 pushq $cpu_lwkt_restore 831 movq %rsp,TD_SP(%rbx) 832 /* 833 * %rax contains new thread, %rbx contains old thread. 834 */ 835 movq %rax,PCPU(curthread) 836 movq TD_SP(%rax),%rsp 837 ret 838END(cpu_lwkt_switch) 839 840/* 841 * cpu_lwkt_restore() (current thread in %rax on entry) 842 * 843 * Standard LWKT restore function. This function is always called 844 * while in a critical section. 845 * 846 * WARNING! Due to preemption the restore function can be used to 'return' 847 * to the original thread. Interrupt disablement must be 848 * protected through the switch so we cannot run splz here. 849 */ 850ENTRY(cpu_lwkt_restore) 851#ifdef PREEMPT_OPTIMIZE 852 /* 853 * If we are preempting someone we borrow their %cr3 and pmap 854 */ 855 movq TD_PREEMPTED(%rax),%r14 /* kernel thread preempting? */ 856 testq %r14,%r14 857 jne 1f /* yes, borrow %cr3 from old thread */ 858#endif 859 /* 860 * Don't reload %cr3 if it hasn't changed. Since this is a LWKT 861 * thread (a kernel thread), and the kernel_pmap always permanently 862 * sets all pm_active bits, we don't have the same problem with it 863 * that we do with process pmaps. 864 */ 865 movq KPML4phys,%rcx 866 movq %cr3,%rdx 867 cmpq %rcx,%rdx 868 je 1f 869 movq %rcx,%cr3 8701: 871 /* 872 * NOTE: %rbx is the previous thread and %rax is the new thread. 873 * %rbx is retained throughout so we can return it. 874 * 875 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 876 */ 877 movq %rbx,%rax 878 popfq 879 popq %r15 880 popq %r14 881 popq %r13 882 popq %r12 883 popq %rbx 884 popq %rbp 885 ret 886END(cpu_lwkt_restore) 887