1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#if JG 81#include <machine_base/apic/apicreg.h> 82#endif 83#include <machine/lock.h> 84 85#define CHECKNZ(expr, scratch_reg) \ 86 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7: 87 88#include "assym.s" 89 90#if defined(SMP) 91#define MPLOCKED lock ; 92#else 93#define MPLOCKED 94#endif 95 96 .data 97 98 .globl panic 99 100#if defined(SWTCH_OPTIM_STATS) 101 .globl swtch_optim_stats, tlb_flush_count 102swtch_optim_stats: .long 0 /* number of _swtch_optims */ 103tlb_flush_count: .long 0 104#endif 105 106 .text 107 108 109/* 110 * cpu_heavy_switch(struct thread *next_thread) 111 * 112 * Switch from the current thread to a new thread. This entry 113 * is normally called via the thread->td_switch function, and will 114 * only be called when the current thread is a heavy weight process. 115 * 116 * Some instructions have been reordered to reduce pipeline stalls. 117 * 118 * YYY disable interrupts once giant is removed. 119 */ 120ENTRY(cpu_heavy_switch) 121 /* 122 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 123 */ 124 movq PCPU(curthread),%rcx 125 /* On top of the stack is the return adress. */ 126 movq (%rsp),%rax /* (reorder optimization) */ 127 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 128 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 129 movq %rbx,PCB_RBX(%rdx) 130 movq %rsp,PCB_RSP(%rdx) 131 movq %rbp,PCB_RBP(%rdx) 132 movq %r12,PCB_R12(%rdx) 133 movq %r13,PCB_R13(%rdx) 134 movq %r14,PCB_R14(%rdx) 135 movq %r15,PCB_R15(%rdx) 136 137 movq %rcx,%rbx /* RBX = curthread */ 138 movq TD_LWP(%rcx),%rcx 139 movl PCPU(cpuid), %eax 140 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 141 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 142 143 /* 144 * Push the LWKT switch restore function, which resumes a heavy 145 * weight process. Note that the LWKT switcher is based on 146 * TD_SP, while the heavy weight process switcher is based on 147 * PCB_RSP. TD_SP is usually two ints pushed relative to 148 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 149 */ 150 pushfq 151 movq $cpu_heavy_restore, %rax 152 pushq %rax 153 movq %rsp,TD_SP(%rbx) 154 155 /* 156 * Save debug regs if necessary 157 */ 158 movq PCB_FLAGS(%rdx),%rax 159 andq $PCB_DBREGS,%rax 160 jz 1f /* no, skip over */ 161 movq %dr7,%rax /* yes, do the save */ 162 movq %rax,PCB_DR7(%rdx) 163 /* JG correct value? */ 164 andq $0x0000fc00, %rax /* disable all watchpoints */ 165 movq %rax,%dr7 166 movq %dr6,%rax 167 movq %rax,PCB_DR6(%rdx) 168 movq %dr3,%rax 169 movq %rax,PCB_DR3(%rdx) 170 movq %dr2,%rax 171 movq %rax,PCB_DR2(%rdx) 172 movq %dr1,%rax 173 movq %rax,PCB_DR1(%rdx) 174 movq %dr0,%rax 175 movq %rax,PCB_DR0(%rdx) 1761: 177 178#if 1 179 /* 180 * Save the FP state if we have used the FP. Note that calling 181 * npxsave will NULL out PCPU(npxthread). 182 */ 183 cmpq %rbx,PCPU(npxthread) 184 jne 1f 185 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 186 movq TD_SAVEFPU(%rbx),%rdi 187 call npxsave /* do it in a big C function */ 188 movq %r12,%rdi /* restore %rdi */ 1891: 190#endif 191 192 /* 193 * Switch to the next thread, which was passed as an argument 194 * to cpu_heavy_switch(). The argument is in %rdi. 195 * Set the current thread, load the stack pointer, 196 * and 'ret' into the switch-restore function. 197 * 198 * The switch restore function expects the new thread to be in %rax 199 * and the old one to be in %rbx. 200 * 201 * There is a one-instruction window where curthread is the new 202 * thread but %rsp still points to the old thread's stack, but 203 * we are protected by a critical section so it is ok. 204 */ 205 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 206 movq %rax,PCPU(curthread) 207 movq TD_SP(%rax),%rsp 208 CHECKNZ((%rsp), %r9) 209 ret 210 211/* 212 * cpu_exit_switch(struct thread *next) 213 * 214 * The switch function is changed to this when a thread is going away 215 * for good. We have to ensure that the MMU state is not cached, and 216 * we don't bother saving the existing thread state before switching. 217 * 218 * At this point we are in a critical section and this cpu owns the 219 * thread's token, which serves as an interlock until the switchout is 220 * complete. 221 */ 222ENTRY(cpu_exit_switch) 223 /* 224 * Get us out of the vmspace 225 */ 226 movq KPML4phys,%rcx 227 movq %cr3,%rax 228 cmpq %rcx,%rax 229 je 1f 230 /* JG no increment of statistics counters? see cpu_heavy_restore */ 231 movq %rcx,%cr3 2321: 233 movq PCPU(curthread),%rbx 234 235 /* 236 * If this is a process/lwp, deactivate the pmap after we've 237 * switched it out. 238 */ 239 movq TD_LWP(%rbx),%rcx 240 testq %rcx,%rcx 241 jz 2f 242 movl PCPU(cpuid), %eax 243 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 244 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 2452: 246 /* 247 * Switch to the next thread. RET into the restore function, which 248 * expects the new thread in RAX and the old in RBX. 249 * 250 * There is a one-instruction window where curthread is the new 251 * thread but %rsp still points to the old thread's stack, but 252 * we are protected by a critical section so it is ok. 253 */ 254 movq %rdi,%rax 255 movq %rax,PCPU(curthread) 256 movq TD_SP(%rax),%rsp 257 CHECKNZ((%rsp), %r9) 258 ret 259 260/* 261 * cpu_heavy_restore() (current thread in %rax on entry) 262 * 263 * Restore the thread after an LWKT switch. This entry is normally 264 * called via the LWKT switch restore function, which was pulled 265 * off the thread stack and jumped to. 266 * 267 * This entry is only called if the thread was previously saved 268 * using cpu_heavy_switch() (the heavy weight process thread switcher), 269 * or when a new process is initially scheduled. The first thing we 270 * do is clear the TDF_RUNNING bit in the old thread and set it in the 271 * new thread. 272 * 273 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 274 * a preemption switch may interrupt the process and then return via 275 * cpu_heavy_restore. 276 * 277 * YYY theoretically we do not have to restore everything here, a lot 278 * of this junk can wait until we return to usermode. But for now 279 * we restore everything. 280 * 281 * YYY the PCB crap is really crap, it makes startup a bitch because 282 * we can't switch away. 283 * 284 * YYY note: spl check is done in mi_switch when it splx()'s. 285 */ 286 287ENTRY(cpu_heavy_restore) 288 popfq 289 movq TD_LWP(%rax),%rcx 290 291#if defined(SWTCH_OPTIM_STATS) 292 incl _swtch_optim_stats 293#endif 294 /* 295 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 296 * safely test/reload %cr3 until after we have set the bit in the 297 * pmap (remember, we do not hold the MP lock in the switch code). 298 */ 299 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 300 movl PCPU(cpumask), %esi 301 MPLOCKED orl %esi, VM_PMAP+PM_ACTIVE(%rcx) 302#ifdef SMP 303 testl $CPUMASK_LOCK,VM_PMAP+PM_ACTIVE(%rcx) 304 jz 1f 305 pushq %rax 306 movq %rcx,%rdi 307 call pmap_interlock_wait /* pmap_interlock_wait(vm) */ 308 popq %rax 3091: 310#endif 311 312 /* 313 * Restore the MMU address space. If it is the same as the last 314 * thread we don't have to invalidate the tlb (i.e. reload cr3). 315 * YYY which naturally also means that the PM_ACTIVE bit had better 316 * already have been set before we set it above, check? YYY 317 */ 318 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 319 movq %cr3,%rsi 320 movq PCB_CR3(%rdx),%rcx 321 cmpq %rsi,%rcx 322 je 4f 323#if defined(SWTCH_OPTIM_STATS) 324 decl _swtch_optim_stats 325 incl _tlb_flush_count 326#endif 327 movq %rcx,%cr3 3284: 329 /* 330 * Clear TDF_RUNNING flag in old thread only after cleaning up 331 * %cr3. The target thread is already protected by being TDF_RUNQ 332 * so setting TDF_RUNNING isn't as big a deal. 333 */ 334 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 335 orl $TDF_RUNNING,TD_FLAGS(%rax) 336 337 /* 338 * Deal with the PCB extension, restore the private tss 339 */ 340 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 341 /* JG cheaper than "movq $1,%rbx", right? */ 342 /* JG what's that magic value $1? */ 343 movl $1,%ebx /* maybe mark use of a private tss */ 344 testq %rdi,%rdi 345#if JG 346 jnz 2f 347#endif 348 349 /* JG 350 * Going back to the common_tss. We may need to update TSS_ESP0 351 * which sets the top of the supervisor stack when entering from 352 * usermode. The PCB is at the top of the stack but we need another 353 * 16 bytes to take vm86 into account. 354 */ 355 leaq -16(%rdx),%rbx 356 movq %rbx, PCPU(common_tss) + TSS_RSP0 357 movq %rbx, PCPU(rsp0) 358 359#if JG 360 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 361 je 3f /* already using the common TSS */ 362 363 /* JG? */ 364 subl %ebx,%ebx /* unmark use of private tss */ 365 366 /* 367 * Get the address of the common TSS descriptor for the ltr. 368 * There is no way to get the address of a segment-accessed variable 369 * so we store a self-referential pointer at the base of the per-cpu 370 * data area and add the appropriate offset. 371 */ 372 /* JG movl? */ 373 movq $gd_common_tssd, %rdi 374 /* JG name for "%gs:0"? */ 375 addq %gs:0, %rdi 376 377 /* 378 * Move the correct TSS descriptor into the GDT slot, then reload 379 * ltr. 380 */ 3812: 382 /* JG */ 383 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */ 384 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 385 movq 0(%rdi), %rax 386 movq %rax, 0(%rbx) 387 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 388 ltr %si 389#endif 390 3913: 392 /* 393 * Restore the user %gs and %fs 394 */ 395 movq PCB_FSBASE(%rdx),%r9 396 cmpq PCPU(user_fs),%r9 397 je 4f 398 movq %rdx,%r10 399 movq %r9,PCPU(user_fs) 400 movl $MSR_FSBASE,%ecx 401 movl PCB_FSBASE(%r10),%eax 402 movl PCB_FSBASE+4(%r10),%edx 403 wrmsr 404 movq %r10,%rdx 4054: 406 movq PCB_GSBASE(%rdx),%r9 407 cmpq PCPU(user_gs),%r9 408 je 5f 409 movq %rdx,%r10 410 movq %r9,PCPU(user_gs) 411 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 412 movl PCB_GSBASE(%r10),%eax 413 movl PCB_GSBASE+4(%r10),%edx 414 wrmsr 415 movq %r10,%rdx 4165: 417 418 /* 419 * Restore general registers. 420 */ 421 movq PCB_RBX(%rdx), %rbx 422 movq PCB_RSP(%rdx), %rsp 423 movq PCB_RBP(%rdx), %rbp 424 movq PCB_R12(%rdx), %r12 425 movq PCB_R13(%rdx), %r13 426 movq PCB_R14(%rdx), %r14 427 movq PCB_R15(%rdx), %r15 428 movq PCB_RIP(%rdx), %rax 429 movq %rax, (%rsp) 430 431#if JG 432 /* 433 * Restore the user LDT if we have one 434 */ 435 cmpl $0, PCB_USERLDT(%edx) 436 jnz 1f 437 movl _default_ldt,%eax 438 cmpl PCPU(currentldt),%eax 439 je 2f 440 lldt _default_ldt 441 movl %eax,PCPU(currentldt) 442 jmp 2f 4431: pushl %edx 444 call set_user_ldt 445 popl %edx 4462: 447#endif 448#if JG 449 /* 450 * Restore the user TLS if we have one 451 */ 452 pushl %edx 453 call set_user_TLS 454 popl %edx 455#endif 456 457 /* 458 * Restore the DEBUG register state if necessary. 459 */ 460 movq PCB_FLAGS(%rdx),%rax 461 andq $PCB_DBREGS,%rax 462 jz 1f /* no, skip over */ 463 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 464 movq %rax,%dr6 465 movq PCB_DR3(%rdx),%rax 466 movq %rax,%dr3 467 movq PCB_DR2(%rdx),%rax 468 movq %rax,%dr2 469 movq PCB_DR1(%rdx),%rax 470 movq %rax,%dr1 471 movq PCB_DR0(%rdx),%rax 472 movq %rax,%dr0 473 movq %dr7,%rax /* load dr7 so as not to disturb */ 474 /* JG correct value? */ 475 andq $0x0000fc00,%rax /* reserved bits */ 476 /* JG we've got more registers on x86_64 */ 477 pushq %rbx 478 movq PCB_DR7(%rdx),%rbx 479 /* JG correct value? */ 480 andq $~0x0000fc00,%rbx 481 orq %rbx,%rax 482 popq %rbx 483 movq %rax,%dr7 4841: 485 486 CHECKNZ((%rsp), %r9) 487 ret 488 489/* 490 * savectx(struct pcb *pcb) 491 * 492 * Update pcb, saving current processor state. 493 */ 494ENTRY(savectx) 495 /* fetch PCB */ 496 /* JG use %rdi instead of %rcx everywhere? */ 497 movq %rdi,%rcx 498 499 /* caller's return address - child won't execute this routine */ 500 movq (%rsp),%rax 501 movq %rax,PCB_RIP(%rcx) 502 503 movq %cr3,%rax 504 movq %rax,PCB_CR3(%rcx) 505 506 movq %rbx,PCB_RBX(%rcx) 507 movq %rsp,PCB_RSP(%rcx) 508 movq %rbp,PCB_RBP(%rcx) 509 movq %r12,PCB_R12(%rcx) 510 movq %r13,PCB_R13(%rcx) 511 movq %r14,PCB_R14(%rcx) 512 movq %r15,PCB_R15(%rcx) 513 514#if 1 515 /* 516 * If npxthread == NULL, then the npx h/w state is irrelevant and the 517 * state had better already be in the pcb. This is true for forks 518 * but not for dumps (the old book-keeping with FP flags in the pcb 519 * always lost for dumps because the dump pcb has 0 flags). 520 * 521 * If npxthread != NULL, then we have to save the npx h/w state to 522 * npxthread's pcb and copy it to the requested pcb, or save to the 523 * requested pcb and reload. Copying is easier because we would 524 * have to handle h/w bugs for reloading. We used to lose the 525 * parent's npx state for forks by forgetting to reload. 526 */ 527 movq PCPU(npxthread),%rax 528 testq %rax,%rax 529 jz 1f 530 531 pushq %rcx /* target pcb */ 532 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 533 pushq %rax 534 535 movq %rax,%rdi 536 call npxsave 537 538 popq %rax 539 popq %rcx 540 541 movq $PCB_SAVEFPU_SIZE,%rdx 542 leaq PCB_SAVEFPU(%rcx),%rcx 543 movq %rcx,%rsi 544 movq %rax,%rdi 545 call bcopy 546#endif 547 5481: 549 CHECKNZ((%rsp), %r9) 550 ret 551 552/* 553 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 554 * 555 * Don't bother setting up any regs other than %rbp so backtraces 556 * don't die. This restore function is used to bootstrap into the 557 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 558 * switching. 559 * 560 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 561 * 562 * If we are an AP we have to call ap_init() before jumping to 563 * cpu_idle(). ap_init() will synchronize with the BP and finish 564 * setting up various ncpu-dependant globaldata fields. This may 565 * happen on UP as well as SMP if we happen to be simulating multiple 566 * cpus. 567 */ 568ENTRY(cpu_idle_restore) 569 /* cli */ 570 movq KPML4phys,%rcx 571 /* JG xor? */ 572 movl $0,%ebp 573 /* JG push RBP? */ 574 pushq $0 575 movq %rcx,%cr3 576 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 577 orl $TDF_RUNNING,TD_FLAGS(%rax) 578#ifdef SMP 579 cmpl $0,PCPU(cpuid) 580 je 1f 581 call ap_init 5821: 583#endif 584 /* 585 * ap_init can decide to enable interrupts early, but otherwise, or if 586 * we are UP, do it here. 587 */ 588 sti 589 jmp cpu_idle 590 591/* 592 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 593 * 594 * Don't bother setting up any regs other then %rbp so backtraces 595 * don't die. This restore function is used to bootstrap into an 596 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 597 * after this. 598 * 599 * Since all of our context is on the stack we are reentrant and 600 * we can release our critical section and enable interrupts early. 601 */ 602ENTRY(cpu_kthread_restore) 603 sti 604 movq KPML4phys,%rcx 605 movq TD_PCB(%rax),%rdx 606 /* JG "movq $0, %rbp"? "xorq %rbp, %rbp"? */ 607 movl $0,%ebp 608 movq %rcx,%cr3 609 /* rax and rbx come from the switchout code */ 610 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 611 orl $TDF_RUNNING,TD_FLAGS(%rax) 612 decl TD_CRITCOUNT(%rax) 613 movq PCB_R12(%rdx),%rdi /* argument to RBX function */ 614 movq PCB_RBX(%rdx),%rax /* thread function */ 615 /* note: top of stack return address inherited by function */ 616 CHECKNZ(%rax, %r9) 617 jmp *%rax 618 619/* 620 * cpu_lwkt_switch(struct thread *) 621 * 622 * Standard LWKT switching function. Only non-scratch registers are 623 * saved and we don't bother with the MMU state or anything else. 624 * 625 * This function is always called while in a critical section. 626 * 627 * There is a one-instruction window where curthread is the new 628 * thread but %rsp still points to the old thread's stack, but 629 * we are protected by a critical section so it is ok. 630 * 631 * YYY BGL, SPL 632 */ 633ENTRY(cpu_lwkt_switch) 634 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */ 635 /* JG we've got more registers on x86_64 */ 636 pushq %rbx 637 movq PCPU(curthread),%rbx 638 pushq %r12 639 pushq %r13 640 pushq %r14 641 pushq %r15 642 pushfq 643 644#if 1 645 /* 646 * Save the FP state if we have used the FP. Note that calling 647 * npxsave will NULL out PCPU(npxthread). 648 * 649 * We have to deal with the FP state for LWKT threads in case they 650 * happen to get preempted or block while doing an optimized 651 * bzero/bcopy/memcpy. 652 */ 653 cmpq %rbx,PCPU(npxthread) 654 jne 1f 655 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 656 movq TD_SAVEFPU(%rbx),%rdi 657 call npxsave /* do it in a big C function */ 658 movq %r12,%rdi /* restore %rdi */ 6591: 660#endif 661 662 movq %rdi,%rax /* switch to this thread */ 663 pushq $cpu_lwkt_restore 664 movq %rsp,TD_SP(%rbx) 665 movq %rax,PCPU(curthread) 666 movq TD_SP(%rax),%rsp 667 668 /* 669 * %rax contains new thread, %rbx contains old thread. 670 */ 671 CHECKNZ((%rsp), %r9) 672 ret 673 674/* 675 * cpu_lwkt_restore() (current thread in %rax on entry) 676 * 677 * Standard LWKT restore function. This function is always called 678 * while in a critical section. 679 * 680 * Warning: due to preemption the restore function can be used to 681 * 'return' to the original thread. Interrupt disablement must be 682 * protected through the switch so we cannot run splz here. 683 * 684 * YYY we theoretically do not need to load KPML4phys into cr3, but if 685 * so we need a way to detect when the PTD we are using is being 686 * deleted due to a process exiting. 687 */ 688ENTRY(cpu_lwkt_restore) 689 movq KPML4phys,%rcx /* YYY borrow but beware desched/cpuchg/exit */ 690 movq %cr3,%rdx 691 cmpq %rcx,%rdx 692 je 1f 693 movq %rcx,%cr3 6941: 695 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 696 orl $TDF_RUNNING,TD_FLAGS(%rax) 697 popfq 698 popq %r15 699 popq %r14 700 popq %r13 701 popq %r12 702 popq %rbx 703 popq %rbp 704 ret 705