1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#include <machine/lock.h> 81 82#define CHECKNZ(expr, scratch_reg) \ 83 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7: 84 85#include "assym.s" 86 87#if defined(SMP) 88#define MPLOCKED lock ; 89#else 90#define MPLOCKED 91#endif 92 93 .data 94 95 .globl panic 96 97#if defined(SWTCH_OPTIM_STATS) 98 .globl swtch_optim_stats, tlb_flush_count 99swtch_optim_stats: .long 0 /* number of _swtch_optims */ 100tlb_flush_count: .long 0 101#endif 102 103 .text 104 105 106/* 107 * cpu_heavy_switch(struct thread *next_thread) 108 * 109 * Switch from the current thread to a new thread. This entry 110 * is normally called via the thread->td_switch function, and will 111 * only be called when the current thread is a heavy weight process. 112 * 113 * Some instructions have been reordered to reduce pipeline stalls. 114 * 115 * YYY disable interrupts once giant is removed. 116 */ 117ENTRY(cpu_heavy_switch) 118 /* 119 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 120 */ 121 movq PCPU(curthread),%rcx 122 /* On top of the stack is the return adress. */ 123 movq (%rsp),%rax /* (reorder optimization) */ 124 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 125 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 126 movq %rbx,PCB_RBX(%rdx) 127 movq %rsp,PCB_RSP(%rdx) 128 movq %rbp,PCB_RBP(%rdx) 129 movq %r12,PCB_R12(%rdx) 130 movq %r13,PCB_R13(%rdx) 131 movq %r14,PCB_R14(%rdx) 132 movq %r15,PCB_R15(%rdx) 133 134 movq %rcx,%rbx /* RBX = curthread */ 135 movq TD_LWP(%rcx),%rcx 136 movl PCPU(cpuid), %eax 137 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 138 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 139 140 /* 141 * Push the LWKT switch restore function, which resumes a heavy 142 * weight process. Note that the LWKT switcher is based on 143 * TD_SP, while the heavy weight process switcher is based on 144 * PCB_RSP. TD_SP is usually two ints pushed relative to 145 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 146 */ 147 pushfq 148 movq $cpu_heavy_restore, %rax 149 pushq %rax 150 movq %rsp,TD_SP(%rbx) 151 152 /* 153 * Save debug regs if necessary 154 */ 155 movq PCB_FLAGS(%rdx),%rax 156 andq $PCB_DBREGS,%rax 157 jz 1f /* no, skip over */ 158 movq %dr7,%rax /* yes, do the save */ 159 movq %rax,PCB_DR7(%rdx) 160 /* JG correct value? */ 161 andq $0x0000fc00, %rax /* disable all watchpoints */ 162 movq %rax,%dr7 163 movq %dr6,%rax 164 movq %rax,PCB_DR6(%rdx) 165 movq %dr3,%rax 166 movq %rax,PCB_DR3(%rdx) 167 movq %dr2,%rax 168 movq %rax,PCB_DR2(%rdx) 169 movq %dr1,%rax 170 movq %rax,PCB_DR1(%rdx) 171 movq %dr0,%rax 172 movq %rax,PCB_DR0(%rdx) 1731: 174 175#if 1 176 /* 177 * Save the FP state if we have used the FP. Note that calling 178 * npxsave will NULL out PCPU(npxthread). 179 */ 180 cmpq %rbx,PCPU(npxthread) 181 jne 1f 182 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 183 movq TD_SAVEFPU(%rbx),%rdi 184 call npxsave /* do it in a big C function */ 185 movq %r12,%rdi /* restore %rdi */ 1861: 187#endif 188 189 /* 190 * Switch to the next thread, which was passed as an argument 191 * to cpu_heavy_switch(). The argument is in %rdi. 192 * Set the current thread, load the stack pointer, 193 * and 'ret' into the switch-restore function. 194 * 195 * The switch restore function expects the new thread to be in %rax 196 * and the old one to be in %rbx. 197 * 198 * There is a one-instruction window where curthread is the new 199 * thread but %rsp still points to the old thread's stack, but 200 * we are protected by a critical section so it is ok. 201 */ 202 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 203 movq %rax,PCPU(curthread) 204 movq TD_SP(%rax),%rsp 205 CHECKNZ((%rsp), %r9) 206 ret 207 208/* 209 * cpu_exit_switch(struct thread *next) 210 * 211 * The switch function is changed to this when a thread is going away 212 * for good. We have to ensure that the MMU state is not cached, and 213 * we don't bother saving the existing thread state before switching. 214 * 215 * At this point we are in a critical section and this cpu owns the 216 * thread's token, which serves as an interlock until the switchout is 217 * complete. 218 */ 219ENTRY(cpu_exit_switch) 220 /* 221 * Get us out of the vmspace 222 */ 223#if 0 224 movq KPML4phys,%rcx 225 movq %cr3,%rax 226 cmpq %rcx,%rax 227 je 1f 228 /* JG no increment of statistics counters? see cpu_heavy_restore */ 229 movq %rcx,%cr3 2301: 231#endif 232 movq PCPU(curthread),%rbx 233 234 /* 235 * If this is a process/lwp, deactivate the pmap after we've 236 * switched it out. 237 */ 238 movq TD_LWP(%rbx),%rcx 239 testq %rcx,%rcx 240 jz 2f 241 movl PCPU(cpuid), %eax 242 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 243 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 2442: 245 /* 246 * Switch to the next thread. RET into the restore function, which 247 * expects the new thread in RAX and the old in RBX. 248 * 249 * There is a one-instruction window where curthread is the new 250 * thread but %rsp still points to the old thread's stack, but 251 * we are protected by a critical section so it is ok. 252 */ 253 movq %rdi,%rax 254 movq %rax,PCPU(curthread) 255 movq TD_SP(%rax),%rsp 256 CHECKNZ((%rsp), %r9) 257 ret 258 259/* 260 * cpu_heavy_restore() (current thread in %rax on entry) 261 * 262 * Restore the thread after an LWKT switch. This entry is normally 263 * called via the LWKT switch restore function, which was pulled 264 * off the thread stack and jumped to. 265 * 266 * This entry is only called if the thread was previously saved 267 * using cpu_heavy_switch() (the heavy weight process thread switcher), 268 * or when a new process is initially scheduled. The first thing we 269 * do is clear the TDF_RUNNING bit in the old thread and set it in the 270 * new thread. 271 * 272 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 273 * a preemption switch may interrupt the process and then return via 274 * cpu_heavy_restore. 275 * 276 * YYY theoretically we do not have to restore everything here, a lot 277 * of this junk can wait until we return to usermode. But for now 278 * we restore everything. 279 * 280 * YYY the PCB crap is really crap, it makes startup a bitch because 281 * we can't switch away. 282 * 283 * YYY note: spl check is done in mi_switch when it splx()'s. 284 */ 285 286ENTRY(cpu_heavy_restore) 287 popfq 288 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 289 movq TD_LWP(%rax),%rcx 290 291#if defined(SWTCH_OPTIM_STATS) 292 incl _swtch_optim_stats 293#endif 294 /* 295 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 296 * safely test/reload %cr3 until after we have set the bit in the 297 * pmap (remember, we do not hold the MP lock in the switch code). 298 */ 299 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 300 movl PCPU(cpuid), %esi 301 MPLOCKED btsl %esi, VM_PMAP+PM_ACTIVE(%rcx) 302 303 /* 304 * Restore the MMU address space. If it is the same as the last 305 * thread we don't have to invalidate the tlb (i.e. reload cr3). 306 * YYY which naturally also means that the PM_ACTIVE bit had better 307 * already have been set before we set it above, check? YYY 308 */ 309#if 0 310 movq %cr3,%rsi 311 movq PCB_CR3(%rdx),%rcx 312 cmpq %rsi,%rcx 313 je 4f 314#if defined(SWTCH_OPTIM_STATS) 315 decl _swtch_optim_stats 316 incl _tlb_flush_count 317#endif 318 movq %rcx,%cr3 3194: 320#endif 321 /* 322 * Clear TDF_RUNNING flag in old thread only after cleaning up 323 * %cr3. The target thread is already protected by being TDF_RUNQ 324 * so setting TDF_RUNNING isn't as big a deal. 325 */ 326 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 327 orl $TDF_RUNNING,TD_FLAGS(%rax) 328 329#if 0 330 /* 331 * Deal with the PCB extension, restore the private tss 332 */ 333 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 334 /* JG cheaper than "movq $1,%rbx", right? */ 335 /* JG what's that magic value $1? */ 336 movl $1,%ebx /* maybe mark use of a private tss */ 337 testq %rdi,%rdi 338#if JG 339 jnz 2f 340#endif 341 342 /* JG 343 * Going back to the common_tss. We may need to update TSS_ESP0 344 * which sets the top of the supervisor stack when entering from 345 * usermode. The PCB is at the top of the stack but we need another 346 * 16 bytes to take vm86 into account. 347 */ 348 leaq -16(%rdx),%rbx 349 movq %rbx, PCPU(common_tss) + TSS_RSP0 350 movq %rbx, PCPU(rsp0) 351 352#if JG 353 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 354 je 3f /* already using the common TSS */ 355 356 /* JG? */ 357 subl %ebx,%ebx /* unmark use of private tss */ 358 359 /* 360 * Get the address of the common TSS descriptor for the ltr. 361 * There is no way to get the address of a segment-accessed variable 362 * so we store a self-referential pointer at the base of the per-cpu 363 * data area and add the appropriate offset. 364 */ 365 /* JG movl? */ 366 movq $gd_common_tssd, %rdi 367 /* JG name for "%gs:0"? */ 368 addq %gs:0, %rdi 369 370 /* 371 * Move the correct TSS descriptor into the GDT slot, then reload 372 * ltr. 373 */ 3742: 375 /* JG */ 376 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */ 377 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 378 movq 0(%rdi), %rax 379 movq %rax, 0(%rbx) 380 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 381 ltr %si 382#endif 383 3843: 385#endif 386#if 0 387 /* 388 * Restore the user %gs and %fs 389 */ 390 movq PCB_FSBASE(%rdx),%r9 391 cmpq PCPU(user_fs),%r9 392 je 4f 393 movq %rdx,%r10 394 movq %r9,PCPU(user_fs) 395 movl $MSR_FSBASE,%ecx 396 movl PCB_FSBASE(%r10),%eax 397 movl PCB_FSBASE+4(%r10),%edx 398 wrmsr 399 movq %r10,%rdx 4004: 401 movq PCB_GSBASE(%rdx),%r9 402 cmpq PCPU(user_gs),%r9 403 je 5f 404 movq %rdx,%r10 405 movq %r9,PCPU(user_gs) 406 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 407 movl PCB_GSBASE(%r10),%eax 408 movl PCB_GSBASE+4(%r10),%edx 409 wrmsr 410 movq %r10,%rdx 4115: 412#endif 413 414 /* 415 * Restore general registers. 416 */ 417 movq PCB_RBX(%rdx), %rbx 418 movq PCB_RSP(%rdx), %rsp 419 movq PCB_RBP(%rdx), %rbp 420 movq PCB_R12(%rdx), %r12 421 movq PCB_R13(%rdx), %r13 422 movq PCB_R14(%rdx), %r14 423 movq PCB_R15(%rdx), %r15 424 movq PCB_RIP(%rdx), %rax 425 movq %rax, (%rsp) 426 427#if 0 428 /* 429 * Restore the user LDT if we have one 430 */ 431 cmpl $0, PCB_USERLDT(%edx) 432 jnz 1f 433 movl _default_ldt,%eax 434 cmpl PCPU(currentldt),%eax 435 je 2f 436 lldt _default_ldt 437 movl %eax,PCPU(currentldt) 438 jmp 2f 4391: pushl %edx 440 call set_user_ldt 441 popl %edx 4422: 443#endif 444#if 0 445 /* 446 * Restore the user TLS if we have one 447 */ 448 pushl %edx 449 call set_user_TLS 450 popl %edx 451#endif 452 453 /* 454 * Restore the DEBUG register state if necessary. 455 */ 456 movq PCB_FLAGS(%rdx),%rax 457 andq $PCB_DBREGS,%rax 458 jz 1f /* no, skip over */ 459 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 460 movq %rax,%dr6 461 movq PCB_DR3(%rdx),%rax 462 movq %rax,%dr3 463 movq PCB_DR2(%rdx),%rax 464 movq %rax,%dr2 465 movq PCB_DR1(%rdx),%rax 466 movq %rax,%dr1 467 movq PCB_DR0(%rdx),%rax 468 movq %rax,%dr0 469 movq %dr7,%rax /* load dr7 so as not to disturb */ 470 /* JG correct value? */ 471 andq $0x0000fc00,%rax /* reserved bits */ 472 /* JG we've got more registers on x86_64 */ 473 pushq %rbx 474 movq PCB_DR7(%rdx),%rbx 475 /* JG correct value? */ 476 andq $~0x0000fc00,%rbx 477 orq %rbx,%rax 478 popq %rbx 479 movq %rax,%dr7 4801: 481 482 CHECKNZ((%rsp), %r9) 483 ret 484 485/* 486 * savectx(struct pcb *pcb) 487 * 488 * Update pcb, saving current processor state. 489 */ 490ENTRY(savectx) 491 /* fetch PCB */ 492 /* JG use %rdi instead of %rcx everywhere? */ 493 movq %rdi,%rcx 494 495 /* caller's return address - child won't execute this routine */ 496 movq (%rsp),%rax 497 movq %rax,PCB_RIP(%rcx) 498 movq %rbx,PCB_RBX(%rcx) 499 movq %rsp,PCB_RSP(%rcx) 500 movq %rbp,PCB_RBP(%rcx) 501 movq %r12,PCB_R12(%rcx) 502 movq %r13,PCB_R13(%rcx) 503 movq %r14,PCB_R14(%rcx) 504 movq %r15,PCB_R15(%rcx) 505 506#if 1 507 /* 508 * If npxthread == NULL, then the npx h/w state is irrelevant and the 509 * state had better already be in the pcb. This is true for forks 510 * but not for dumps (the old book-keeping with FP flags in the pcb 511 * always lost for dumps because the dump pcb has 0 flags). 512 * 513 * If npxthread != NULL, then we have to save the npx h/w state to 514 * npxthread's pcb and copy it to the requested pcb, or save to the 515 * requested pcb and reload. Copying is easier because we would 516 * have to handle h/w bugs for reloading. We used to lose the 517 * parent's npx state for forks by forgetting to reload. 518 */ 519 movq PCPU(npxthread),%rax 520 testq %rax,%rax 521 jz 1f 522 523 pushq %rcx /* target pcb */ 524 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 525 pushq %rax 526 527 movq %rax,%rdi 528 call npxsave 529 530 popq %rax 531 popq %rcx 532 533 movq $PCB_SAVEFPU_SIZE,%rdx 534 leaq PCB_SAVEFPU(%rcx),%rcx 535 movq %rcx,%rsi 536 movq %rax,%rdi 537 call bcopy 538#endif 539 5401: 541 CHECKNZ((%rsp), %r9) 542 ret 543 544/* 545 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 546 * 547 * Don't bother setting up any regs other than %rbp so backtraces 548 * don't die. This restore function is used to bootstrap into the 549 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 550 * switching. 551 * 552 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 553 * 554 * If we are an AP we have to call ap_init() before jumping to 555 * cpu_idle(). ap_init() will synchronize with the BP and finish 556 * setting up various ncpu-dependant globaldata fields. This may 557 * happen on UP as well as SMP if we happen to be simulating multiple 558 * cpus. 559 */ 560ENTRY(cpu_idle_restore) 561 /* cli */ 562 /* JG xor? */ 563 movl $0,%ebp 564 /* JG push RBP? */ 565 pushq $0 566 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 567 orl $TDF_RUNNING,TD_FLAGS(%rax) 568#ifdef SMP 569 cmpl $0,PCPU(cpuid) 570 je 1f 571 call ap_init 5721: 573#endif 574 /* sti */ 575 jmp cpu_idle 576 577/* 578 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 579 * 580 * Don't bother setting up any regs other then %rbp so backtraces 581 * don't die. This restore function is used to bootstrap into an 582 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 583 * after this. 584 * 585 * Since all of our context is on the stack we are reentrant and 586 * we can release our critical section and enable interrupts early. 587 */ 588ENTRY(cpu_kthread_restore) 589 /*sti*/ 590 movq TD_PCB(%rax),%rdx 591 /* JG "movq $0, %rbp"? "xorq %rbp, %rbp"? */ 592 movl $0,%ebp 593 /* rax and rbx come from the switchout code */ 594 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 595 orl $TDF_RUNNING,TD_FLAGS(%rax) 596 decl TD_CRITCOUNT(%rax) 597 movq PCB_R12(%rdx),%rdi /* argument to RBX function */ 598 movq PCB_RBX(%rdx),%rax /* thread function */ 599 /* note: top of stack return address inherited by function */ 600 CHECKNZ(%rax, %r9) 601 jmp *%rax 602 603/* 604 * cpu_lwkt_switch(struct thread *) 605 * 606 * Standard LWKT switching function. Only non-scratch registers are 607 * saved and we don't bother with the MMU state or anything else. 608 * 609 * This function is always called while in a critical section. 610 * 611 * There is a one-instruction window where curthread is the new 612 * thread but %rsp still points to the old thread's stack, but 613 * we are protected by a critical section so it is ok. 614 * 615 * YYY BGL, SPL 616 */ 617ENTRY(cpu_lwkt_switch) 618 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */ 619 /* JG we've got more registers on x86_64 */ 620 pushq %rbx 621 movq PCPU(curthread),%rbx 622 pushq %r12 623 pushq %r13 624 pushq %r14 625 pushq %r15 626 pushfq 627 628#if 1 629 /* 630 * Save the FP state if we have used the FP. Note that calling 631 * npxsave will NULL out PCPU(npxthread). 632 * 633 * We have to deal with the FP state for LWKT threads in case they 634 * happen to get preempted or block while doing an optimized 635 * bzero/bcopy/memcpy. 636 */ 637 cmpq %rbx,PCPU(npxthread) 638 jne 1f 639 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 640 movq TD_SAVEFPU(%rbx),%rdi 641 call npxsave /* do it in a big C function */ 642 movq %r12,%rdi /* restore %rdi */ 6431: 644#endif 645 646 movq %rdi,%rax /* switch to this thread */ 647 pushq $cpu_lwkt_restore 648 movq %rsp,TD_SP(%rbx) 649 movq %rax,PCPU(curthread) 650 movq TD_SP(%rax),%rsp 651 652 /* 653 * %rax contains new thread, %rbx contains old thread. 654 */ 655 CHECKNZ((%rsp), %r9) 656 ret 657 658/* 659 * cpu_lwkt_restore() (current thread in %rax on entry) 660 * 661 * Standard LWKT restore function. This function is always called 662 * while in a critical section. 663 * 664 * Warning: due to preemption the restore function can be used to 665 * 'return' to the original thread. Interrupt disablement must be 666 * protected through the switch so we cannot run splz here. 667 */ 668ENTRY(cpu_lwkt_restore) 669 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 670 orl $TDF_RUNNING,TD_FLAGS(%rax) 671 popfq 672 popq %r15 673 popq %r14 674 popq %r13 675 popq %r12 676 popq %rbx 677 popq %rbp 678 ret 679 680/* 681 * bootstrap_idle() 682 * 683 * Make AP become the idle loop. 684 */ 685ENTRY(bootstrap_idle) 686 movq PCPU(curthread),%rax 687 movq %rax,%rbx 688 movq TD_SP(%rax),%rsp 689 ret 690