1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#include <machine/lock.h> 81 82#define CHECKNZ(expr, scratch_reg) \ 83 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7: 84 85#include "assym.s" 86 87#define MPLOCKED lock ; 88 89 .data 90 91 .globl panic 92 .globl lwkt_switch_return 93 94#if defined(SWTCH_OPTIM_STATS) 95 .globl swtch_optim_stats, tlb_flush_count 96swtch_optim_stats: .long 0 /* number of _swtch_optims */ 97tlb_flush_count: .long 0 98#endif 99 100 .text 101 102 103/* 104 * cpu_heavy_switch(struct thread *next_thread) 105 * 106 * Switch from the current thread to a new thread. This entry 107 * is normally called via the thread->td_switch function, and will 108 * only be called when the current thread is a heavy weight process. 109 * 110 * Some instructions have been reordered to reduce pipeline stalls. 111 * 112 * YYY disable interrupts once giant is removed. 113 */ 114ENTRY(cpu_heavy_switch) 115 /* 116 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 117 */ 118 movq PCPU(curthread),%rcx 119 /* On top of the stack is the return adress. */ 120 movq (%rsp),%rax /* (reorder optimization) */ 121 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 122 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 123 movq %rbx,PCB_RBX(%rdx) 124 movq %rsp,PCB_RSP(%rdx) 125 movq %rbp,PCB_RBP(%rdx) 126 movq %r12,PCB_R12(%rdx) 127 movq %r13,PCB_R13(%rdx) 128 movq %r14,PCB_R14(%rdx) 129 movq %r15,PCB_R15(%rdx) 130 131 movq %rcx,%rbx /* RBX = curthread */ 132 movq TD_LWP(%rcx),%rcx 133 movslq PCPU(cpuid), %rax 134 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 135 MPLOCKED btrq %rax, VM_PMAP+PM_ACTIVE(%rcx) 136 137 /* 138 * Push the LWKT switch restore function, which resumes a heavy 139 * weight process. Note that the LWKT switcher is based on 140 * TD_SP, while the heavy weight process switcher is based on 141 * PCB_RSP. TD_SP is usually two ints pushed relative to 142 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 143 */ 144 pushfq 145 movq $cpu_heavy_restore, %rax 146 pushq %rax 147 movq %rsp,TD_SP(%rbx) 148 149 /* 150 * Save debug regs if necessary 151 */ 152 movq PCB_FLAGS(%rdx),%rax 153 andq $PCB_DBREGS,%rax 154 jz 1f /* no, skip over */ 155 movq %dr7,%rax /* yes, do the save */ 156 movq %rax,PCB_DR7(%rdx) 157 /* JG correct value? */ 158 andq $0x0000fc00, %rax /* disable all watchpoints */ 159 movq %rax,%dr7 160 movq %dr6,%rax 161 movq %rax,PCB_DR6(%rdx) 162 movq %dr3,%rax 163 movq %rax,PCB_DR3(%rdx) 164 movq %dr2,%rax 165 movq %rax,PCB_DR2(%rdx) 166 movq %dr1,%rax 167 movq %rax,PCB_DR1(%rdx) 168 movq %dr0,%rax 169 movq %rax,PCB_DR0(%rdx) 1701: 171 172#if 1 173 /* 174 * Save the FP state if we have used the FP. Note that calling 175 * npxsave will NULL out PCPU(npxthread). 176 */ 177 cmpq %rbx,PCPU(npxthread) 178 jne 1f 179 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 180 movq TD_SAVEFPU(%rbx),%rdi 181 call npxsave /* do it in a big C function */ 182 movq %r12,%rdi /* restore %rdi */ 1831: 184#endif 185 186 /* 187 * Switch to the next thread, which was passed as an argument 188 * to cpu_heavy_switch(). The argument is in %rdi. 189 * Set the current thread, load the stack pointer, 190 * and 'ret' into the switch-restore function. 191 * 192 * The switch restore function expects the new thread to be in %rax 193 * and the old one to be in %rbx. 194 * 195 * There is a one-instruction window where curthread is the new 196 * thread but %rsp still points to the old thread's stack, but 197 * we are protected by a critical section so it is ok. 198 */ 199 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 200 movq %rax,PCPU(curthread) 201 movq TD_SP(%rax),%rsp 202 CHECKNZ((%rsp), %r9) 203 ret 204 205/* 206 * cpu_exit_switch(struct thread *next) 207 * 208 * The switch function is changed to this when a thread is going away 209 * for good. We have to ensure that the MMU state is not cached, and 210 * we don't bother saving the existing thread state before switching. 211 * 212 * At this point we are in a critical section and this cpu owns the 213 * thread's token, which serves as an interlock until the switchout is 214 * complete. 215 */ 216ENTRY(cpu_exit_switch) 217 /* 218 * Get us out of the vmspace 219 */ 220#if 0 221 movq KPML4phys,%rcx 222 movq %cr3,%rax 223 cmpq %rcx,%rax 224 je 1f 225 /* JG no increment of statistics counters? see cpu_heavy_restore */ 226 movq %rcx,%cr3 2271: 228#endif 229 movq PCPU(curthread),%rbx 230 231 /* 232 * If this is a process/lwp, deactivate the pmap after we've 233 * switched it out. 234 */ 235 movq TD_LWP(%rbx),%rcx 236 testq %rcx,%rcx 237 jz 2f 238 movslq PCPU(cpuid), %rax 239 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 240 MPLOCKED btrq %rax, VM_PMAP+PM_ACTIVE(%rcx) 2412: 242 /* 243 * Switch to the next thread. RET into the restore function, which 244 * expects the new thread in RAX and the old in RBX. 245 * 246 * There is a one-instruction window where curthread is the new 247 * thread but %rsp still points to the old thread's stack, but 248 * we are protected by a critical section so it is ok. 249 */ 250 movq %rdi,%rax 251 movq %rax,PCPU(curthread) 252 movq TD_SP(%rax),%rsp 253 CHECKNZ((%rsp), %r9) 254 ret 255 256/* 257 * cpu_heavy_restore() (current thread in %rax on entry) 258 * 259 * Restore the thread after an LWKT switch. This entry is normally 260 * called via the LWKT switch restore function, which was pulled 261 * off the thread stack and jumped to. 262 * 263 * This entry is only called if the thread was previously saved 264 * using cpu_heavy_switch() (the heavy weight process thread switcher), 265 * or when a new process is initially scheduled. 266 * 267 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 268 * a preemption switch may interrupt the process and then return via 269 * cpu_heavy_restore. 270 * 271 * YYY theoretically we do not have to restore everything here, a lot 272 * of this junk can wait until we return to usermode. But for now 273 * we restore everything. 274 * 275 * YYY the PCB crap is really crap, it makes startup a bitch because 276 * we can't switch away. 277 * 278 * YYY note: spl check is done in mi_switch when it splx()'s. 279 */ 280 281ENTRY(cpu_heavy_restore) 282 popfq 283 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 284 movq TD_LWP(%rax),%rcx 285 286#if defined(SWTCH_OPTIM_STATS) 287 incl _swtch_optim_stats 288#endif 289 /* 290 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 291 * safely test/reload %cr3 until after we have set the bit in the 292 * pmap (remember, we do not hold the MP lock in the switch code). 293 */ 294 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 295 movslq PCPU(cpuid), %rsi 296 MPLOCKED btsq %rsi, VM_PMAP+PM_ACTIVE(%rcx) 297 298 /* 299 * Restore the MMU address space. If it is the same as the last 300 * thread we don't have to invalidate the tlb (i.e. reload cr3). 301 * YYY which naturally also means that the PM_ACTIVE bit had better 302 * already have been set before we set it above, check? YYY 303 */ 304#if 0 305 movq %cr3,%rsi 306 movq PCB_CR3(%rdx),%rcx 307 cmpq %rsi,%rcx 308 je 4f 309#if defined(SWTCH_OPTIM_STATS) 310 decl _swtch_optim_stats 311 incl _tlb_flush_count 312#endif 313 movq %rcx,%cr3 3144: 315#endif 316 /* 317 * NOTE: %rbx is the previous thread and %eax is the new thread. 318 * %rbx is retained throughout so we can return it. 319 * 320 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 321 */ 322 323#if 0 324 /* 325 * Deal with the PCB extension, restore the private tss 326 */ 327 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 328 movq $1,%rcx /* maybe mark use of a private tss */ 329 testq %rdi,%rdi 330#if JG 331 jnz 2f 332#endif 333 334 /* JG 335 * Going back to the common_tss. We may need to update TSS_ESP0 336 * which sets the top of the supervisor stack when entering from 337 * usermode. The PCB is at the top of the stack but we need another 338 * 16 bytes to take vm86 into account. 339 */ 340 leaq -16(%rdx),%rcx 341 movq %rcx, PCPU(common_tss) + TSS_RSP0 342 movq %rcx, PCPU(rsp0) 343 344#if JG 345 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 346 je 3f /* already using the common TSS */ 347 348 /* JG? */ 349 subq %rcx,%rcx /* unmark use of private tss */ 350 351 /* 352 * Get the address of the common TSS descriptor for the ltr. 353 * There is no way to get the address of a segment-accessed variable 354 * so we store a self-referential pointer at the base of the per-cpu 355 * data area and add the appropriate offset. 356 */ 357 /* JG movl? */ 358 movq $gd_common_tssd, %rdi 359 /* JG name for "%gs:0"? */ 360 addq %gs:0, %rdi 361 362 /* 363 * Move the correct TSS descriptor into the GDT slot, then reload 364 * ltr. 365 */ 3662: 367 /* JG */ 368 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 369 movq PCPU(tss_gdt), %rcx /* entry in GDT */ 370 movq 0(%rdi), %rax 371 movq %rax, 0(%rcx) 372 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 373 ltr %si 374#endif 375 3763: 377#endif 378#if 0 379 /* 380 * Restore the user %gs and %fs 381 */ 382 movq PCB_FSBASE(%rdx),%r9 383 cmpq PCPU(user_fs),%r9 384 je 4f 385 movq %rdx,%r10 386 movq %r9,PCPU(user_fs) 387 movl $MSR_FSBASE,%ecx 388 movl PCB_FSBASE(%r10),%eax 389 movl PCB_FSBASE+4(%r10),%edx 390 wrmsr 391 movq %r10,%rdx 3924: 393 movq PCB_GSBASE(%rdx),%r9 394 cmpq PCPU(user_gs),%r9 395 je 5f 396 movq %rdx,%r10 397 movq %r9,PCPU(user_gs) 398 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 399 movl PCB_GSBASE(%r10),%eax 400 movl PCB_GSBASE+4(%r10),%edx 401 wrmsr 402 movq %r10,%rdx 4035: 404#endif 405 406 /* 407 * Restore general registers. %rbx is restored later. 408 */ 409 movq PCB_RSP(%rdx), %rsp 410 movq PCB_RBP(%rdx), %rbp 411 movq PCB_R12(%rdx), %r12 412 movq PCB_R13(%rdx), %r13 413 movq PCB_R14(%rdx), %r14 414 movq PCB_R15(%rdx), %r15 415 movq PCB_RIP(%rdx), %rax 416 movq %rax, (%rsp) 417 418#if 0 419 /* 420 * Restore the user LDT if we have one 421 */ 422 cmpl $0, PCB_USERLDT(%edx) 423 jnz 1f 424 movl _default_ldt,%eax 425 cmpl PCPU(currentldt),%eax 426 je 2f 427 lldt _default_ldt 428 movl %eax,PCPU(currentldt) 429 jmp 2f 4301: pushl %edx 431 call set_user_ldt 432 popl %edx 4332: 434#endif 435#if 0 436 /* 437 * Restore the user TLS if we have one 438 */ 439 pushl %edx 440 call set_user_TLS 441 popl %edx 442#endif 443 444 /* 445 * Restore the DEBUG register state if necessary. 446 */ 447 movq PCB_FLAGS(%rdx),%rax 448 andq $PCB_DBREGS,%rax 449 jz 1f /* no, skip over */ 450 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 451 movq %rax,%dr6 452 movq PCB_DR3(%rdx),%rax 453 movq %rax,%dr3 454 movq PCB_DR2(%rdx),%rax 455 movq %rax,%dr2 456 movq PCB_DR1(%rdx),%rax 457 movq %rax,%dr1 458 movq PCB_DR0(%rdx),%rax 459 movq %rax,%dr0 460 movq %dr7,%rax /* load dr7 so as not to disturb */ 461 /* JG correct value? */ 462 andq $0x0000fc00,%rax /* reserved bits */ 463 /* JG we've got more registers on x86_64 */ 464 movq PCB_DR7(%rdx),%rcx 465 /* JG correct value? */ 466 andq $~0x0000fc00,%rcx 467 orq %rcx,%rax 468 movq %rax,%dr7 4691: 470 movq %rbx,%rax 471 movq PCB_RBX(%rdx),%rbx 472 473 CHECKNZ((%rsp), %r9) 474 ret 475 476/* 477 * savectx(struct pcb *pcb) 478 * 479 * Update pcb, saving current processor state. 480 */ 481ENTRY(savectx) 482 /* fetch PCB */ 483 /* JG use %rdi instead of %rcx everywhere? */ 484 movq %rdi,%rcx 485 486 /* caller's return address - child won't execute this routine */ 487 movq (%rsp),%rax 488 movq %rax,PCB_RIP(%rcx) 489 movq %rbx,PCB_RBX(%rcx) 490 movq %rsp,PCB_RSP(%rcx) 491 movq %rbp,PCB_RBP(%rcx) 492 movq %r12,PCB_R12(%rcx) 493 movq %r13,PCB_R13(%rcx) 494 movq %r14,PCB_R14(%rcx) 495 movq %r15,PCB_R15(%rcx) 496 497#if 1 498 /* 499 * If npxthread == NULL, then the npx h/w state is irrelevant and the 500 * state had better already be in the pcb. This is true for forks 501 * but not for dumps (the old book-keeping with FP flags in the pcb 502 * always lost for dumps because the dump pcb has 0 flags). 503 * 504 * If npxthread != NULL, then we have to save the npx h/w state to 505 * npxthread's pcb and copy it to the requested pcb, or save to the 506 * requested pcb and reload. Copying is easier because we would 507 * have to handle h/w bugs for reloading. We used to lose the 508 * parent's npx state for forks by forgetting to reload. 509 */ 510 movq PCPU(npxthread),%rax 511 testq %rax,%rax 512 jz 1f 513 514 pushq %rcx /* target pcb */ 515 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 516 pushq %rax 517 518 movq %rax,%rdi 519 call npxsave 520 521 popq %rax 522 popq %rcx 523 524 movq $PCB_SAVEFPU_SIZE,%rdx 525 leaq PCB_SAVEFPU(%rcx),%rcx 526 movq %rcx,%rsi 527 movq %rax,%rdi 528 call bcopy 529#endif 530 5311: 532 CHECKNZ((%rsp), %r9) 533 ret 534 535/* 536 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 537 * 538 * Don't bother setting up any regs other than %rbp so backtraces 539 * don't die. This restore function is used to bootstrap into the 540 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 541 * switching. 542 * 543 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 544 * This only occurs during system boot so no special handling is 545 * required for migration. 546 * 547 * If we are an AP we have to call ap_init() before jumping to 548 * cpu_idle(). ap_init() will synchronize with the BP and finish 549 * setting up various ncpu-dependant globaldata fields. This may 550 * happen on UP as well as SMP if we happen to be simulating multiple 551 * cpus. 552 */ 553ENTRY(cpu_idle_restore) 554 /* cli */ 555 /* JG xor? */ 556 movl $0,%ebp 557 /* JG push RBP? */ 558 pushq $0 559 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 560 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 561 cmpl $0,PCPU(cpuid) 562 je 1f 563 call ap_init 5641: 565 /* sti */ 566 jmp cpu_idle 567 568/* 569 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 570 * 571 * Don't bother setting up any regs other then %rbp so backtraces 572 * don't die. This restore function is used to bootstrap into an 573 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 574 * after this. 575 * 576 * Because this switch target does not 'return' to lwkt_switch() 577 * we have to call lwkt_switch_return(otd) to clean up otd. 578 * otd is in %ebx. 579 * 580 * Since all of our context is on the stack we are reentrant and 581 * we can release our critical section and enable interrupts early. 582 */ 583ENTRY(cpu_kthread_restore) 584 /*sti*/ 585 movq TD_PCB(%rax),%r13 586 movq $0,%rbp 587 588 /* 589 * rax and rbx come from the switchout code. Call 590 * lwkt_switch_return(otd). 591 * 592 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs. 593 */ 594 pushq %rax 595 movq %rbx,%rdi 596 call lwkt_switch_return 597 popq %rax 598 decl TD_CRITCOUNT(%rax) 599 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 600 movq PCB_RBX(%r13),%rax /* thread function */ 601 /* note: top of stack return address inherited by function */ 602 CHECKNZ(%rax, %r9) 603 jmp *%rax 604 605/* 606 * cpu_lwkt_switch(struct thread *) 607 * 608 * Standard LWKT switching function. Only non-scratch registers are 609 * saved and we don't bother with the MMU state or anything else. 610 * 611 * This function is always called while in a critical section. 612 * 613 * There is a one-instruction window where curthread is the new 614 * thread but %rsp still points to the old thread's stack, but 615 * we are protected by a critical section so it is ok. 616 * 617 * YYY BGL, SPL 618 */ 619ENTRY(cpu_lwkt_switch) 620 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */ 621 /* JG we've got more registers on x86_64 */ 622 pushq %rbx 623 movq PCPU(curthread),%rbx 624 pushq %r12 625 pushq %r13 626 pushq %r14 627 pushq %r15 628 pushfq 629 630#if 1 631 /* 632 * Save the FP state if we have used the FP. Note that calling 633 * npxsave will NULL out PCPU(npxthread). 634 * 635 * We have to deal with the FP state for LWKT threads in case they 636 * happen to get preempted or block while doing an optimized 637 * bzero/bcopy/memcpy. 638 */ 639 cmpq %rbx,PCPU(npxthread) 640 jne 1f 641 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 642 movq TD_SAVEFPU(%rbx),%rdi 643 call npxsave /* do it in a big C function */ 644 movq %r12,%rdi /* restore %rdi */ 6451: 646#endif 647 648 movq %rdi,%rax /* switch to this thread */ 649 pushq $cpu_lwkt_restore 650 movq %rsp,TD_SP(%rbx) 651 movq %rax,PCPU(curthread) 652 movq TD_SP(%rax),%rsp 653 654 /* 655 * %rax contains new thread, %rbx contains old thread. 656 */ 657 CHECKNZ((%rsp), %r9) 658 ret 659 660/* 661 * cpu_lwkt_restore() (current thread in %rax on entry) 662 * 663 * Standard LWKT restore function. This function is always called 664 * while in a critical section. 665 * 666 * Warning: due to preemption the restore function can be used to 667 * 'return' to the original thread. Interrupt disablement must be 668 * protected through the switch so we cannot run splz here. 669 */ 670ENTRY(cpu_lwkt_restore) 671 /* 672 * NOTE: %rbx is the previous thread and %eax is the new thread. 673 * %rbx is retained throughout so we can return it. 674 * 675 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 676 */ 677 movq %rbx,%rax 678 popfq 679 popq %r15 680 popq %r14 681 popq %r13 682 popq %r12 683 popq %rbx 684 popq %rbp 685 ret 686 687/* 688 * bootstrap_idle() 689 * 690 * Make AP become the idle loop. 691 */ 692ENTRY(bootstrap_idle) 693 movq PCPU(curthread),%rax 694 movq %rax,%rbx 695 movq TD_SP(%rax),%rsp 696 ret 697