1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#if JG 81#include <machine_base/apic/apicreg.h> 82#endif 83#include <machine/lock.h> 84 85#include "assym.s" 86 87#if defined(SMP) 88#define MPLOCKED lock ; 89#else 90#define MPLOCKED 91#endif 92 93 .data 94 95 .globl panic 96 97#if defined(SWTCH_OPTIM_STATS) 98 .globl swtch_optim_stats, tlb_flush_count 99swtch_optim_stats: .long 0 /* number of _swtch_optims */ 100tlb_flush_count: .long 0 101#endif 102 103 .text 104 105 106/* 107 * cpu_heavy_switch(struct thread *next_thread) 108 * 109 * Switch from the current thread to a new thread. This entry 110 * is normally called via the thread->td_switch function, and will 111 * only be called when the current thread is a heavy weight process. 112 * 113 * Some instructions have been reordered to reduce pipeline stalls. 114 * 115 * YYY disable interrupts once giant is removed. 116 */ 117ENTRY(cpu_heavy_switch) 118 /* 119 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 120 */ 121 movq PCPU(curthread),%rcx 122 /* On top of the stack is the return adress. */ 123 movq (%rsp),%rax /* (reorder optimization) */ 124 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 125 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 126 movq %rbx,PCB_RBX(%rdx) 127 movq %rsp,PCB_RSP(%rdx) 128 movq %rbp,PCB_RBP(%rdx) 129 movq %r12,PCB_R12(%rdx) 130 movq %r13,PCB_R13(%rdx) 131 movq %r14,PCB_R14(%rdx) 132 movq %r15,PCB_R15(%rdx) 133 134 /* 135 * Clear the cpu bit in the pmap active mask. The restore 136 * function will set the bit in the pmap active mask. 137 * 138 * Special case: when switching between threads sharing the 139 * same vmspace if we avoid clearing the bit we do not have 140 * to reload %cr3 (if we clear the bit we could race page 141 * table ops done by other threads and would have to reload 142 * %cr3, because those ops will not know to IPI us). 143 */ 144 movq %rcx,%rbx /* RBX = oldthread */ 145 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 146 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 147 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 148 testq %r13,%r13 /* might not be a heavy */ 149 jz 1f 150 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 151 je 2f 1521: 153 movl PCPU(cpuid), %eax 154 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 1552: 156 157 /* 158 * Push the LWKT switch restore function, which resumes a heavy 159 * weight process. Note that the LWKT switcher is based on 160 * TD_SP, while the heavy weight process switcher is based on 161 * PCB_RSP. TD_SP is usually two ints pushed relative to 162 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 163 */ 164 pushfq 165 movq $cpu_heavy_restore, %rax 166 pushq %rax 167 movq %rsp,TD_SP(%rbx) 168 169 /* 170 * Save debug regs if necessary 171 */ 172 movq PCB_FLAGS(%rdx),%rax 173 andq $PCB_DBREGS,%rax 174 jz 1f /* no, skip over */ 175 movq %dr7,%rax /* yes, do the save */ 176 movq %rax,PCB_DR7(%rdx) 177 /* JG correct value? */ 178 andq $0x0000fc00, %rax /* disable all watchpoints */ 179 movq %rax,%dr7 180 movq %dr6,%rax 181 movq %rax,PCB_DR6(%rdx) 182 movq %dr3,%rax 183 movq %rax,PCB_DR3(%rdx) 184 movq %dr2,%rax 185 movq %rax,PCB_DR2(%rdx) 186 movq %dr1,%rax 187 movq %rax,PCB_DR1(%rdx) 188 movq %dr0,%rax 189 movq %rax,PCB_DR0(%rdx) 1901: 191 192#if 1 193 /* 194 * Save the FP state if we have used the FP. Note that calling 195 * npxsave will NULL out PCPU(npxthread). 196 */ 197 cmpq %rbx,PCPU(npxthread) 198 jne 1f 199 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 200 movq TD_SAVEFPU(%rbx),%rdi 201 call npxsave /* do it in a big C function */ 202 movq %r12,%rdi /* restore %rdi */ 2031: 204#endif 205 206 /* 207 * Switch to the next thread, which was passed as an argument 208 * to cpu_heavy_switch(). The argument is in %rdi. 209 * Set the current thread, load the stack pointer, 210 * and 'ret' into the switch-restore function. 211 * 212 * The switch restore function expects the new thread to be in %rax 213 * and the old one to be in %rbx. 214 * 215 * There is a one-instruction window where curthread is the new 216 * thread but %rsp still points to the old thread's stack, but 217 * we are protected by a critical section so it is ok. 218 */ 219 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 220 movq %rax,PCPU(curthread) 221 movq TD_SP(%rax),%rsp 222 ret 223 224/* 225 * cpu_exit_switch(struct thread *next) 226 * 227 * The switch function is changed to this when a thread is going away 228 * for good. We have to ensure that the MMU state is not cached, and 229 * we don't bother saving the existing thread state before switching. 230 * 231 * At this point we are in a critical section and this cpu owns the 232 * thread's token, which serves as an interlock until the switchout is 233 * complete. 234 */ 235ENTRY(cpu_exit_switch) 236 /* 237 * Get us out of the vmspace 238 */ 239 movq KPML4phys,%rcx 240 movq %cr3,%rax 241 cmpq %rcx,%rax 242 je 1f 243 /* JG no increment of statistics counters? see cpu_heavy_restore */ 244 movq %rcx,%cr3 2451: 246 movq PCPU(curthread),%rbx 247 248 /* 249 * If this is a process/lwp, deactivate the pmap after we've 250 * switched it out. 251 */ 252 movq TD_LWP(%rbx),%rcx 253 testq %rcx,%rcx 254 jz 2f 255 movl PCPU(cpuid), %eax 256 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 257 MPLOCKED btrl %eax, VM_PMAP+PM_ACTIVE(%rcx) 2582: 259 /* 260 * Switch to the next thread. RET into the restore function, which 261 * expects the new thread in RAX and the old in RBX. 262 * 263 * There is a one-instruction window where curthread is the new 264 * thread but %rsp still points to the old thread's stack, but 265 * we are protected by a critical section so it is ok. 266 */ 267 movq %rdi,%rax 268 movq %rax,PCPU(curthread) 269 movq TD_SP(%rax),%rsp 270 ret 271 272/* 273 * cpu_heavy_restore() (current thread in %rax on entry) 274 * 275 * Restore the thread after an LWKT switch. This entry is normally 276 * called via the LWKT switch restore function, which was pulled 277 * off the thread stack and jumped to. 278 * 279 * This entry is only called if the thread was previously saved 280 * using cpu_heavy_switch() (the heavy weight process thread switcher), 281 * or when a new process is initially scheduled. The first thing we 282 * do is clear the TDF_RUNNING bit in the old thread and set it in the 283 * new thread. 284 * 285 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 286 * a preemption switch may interrupt the process and then return via 287 * cpu_heavy_restore. 288 * 289 * YYY theoretically we do not have to restore everything here, a lot 290 * of this junk can wait until we return to usermode. But for now 291 * we restore everything. 292 * 293 * YYY the PCB crap is really crap, it makes startup a bitch because 294 * we can't switch away. 295 * 296 * YYY note: spl check is done in mi_switch when it splx()'s. 297 */ 298 299ENTRY(cpu_heavy_restore) 300 popfq 301 movq TD_LWP(%rax),%rcx 302 303#if defined(SWTCH_OPTIM_STATS) 304 incl _swtch_optim_stats 305#endif 306 /* 307 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 308 * safely test/reload %cr3 until after we have set the bit in the 309 * pmap (remember, we do not hold the MP lock in the switch code). 310 * 311 * Also note that when switching between two lwps sharing the 312 * same vmspace we have already avoided clearing the cpu bit 313 * in pm_active. If we had cleared it other cpus would not know 314 * to IPI us and we would have to unconditionally reload %cr3. 315 * 316 * Also note that if the pmap is undergoing an atomic inval/mod 317 * that is unaware that our cpu has been added to it we have to 318 * wait for it to complete before we can continue. 319 */ 320 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 321 movl PCPU(cpumask), %esi 322 MPLOCKED orl %esi, VM_PMAP+PM_ACTIVE(%rcx) 323#ifdef SMP 324 testl $CPUMASK_LOCK,VM_PMAP+PM_ACTIVE(%rcx) 325 jz 1f 326 pushq %rax 327 movq %rcx,%rdi 328 call pmap_interlock_wait /* pmap_interlock_wait(vm) */ 329 popq %rax 3301: 331#endif 332 333 /* 334 * Restore the MMU address space. If it is the same as the last 335 * thread we don't have to invalidate the tlb (i.e. reload cr3). 336 * YYY which naturally also means that the PM_ACTIVE bit had better 337 * already have been set before we set it above, check? YYY 338 */ 339 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 340 movq %cr3,%rsi 341 movq PCB_CR3(%rdx),%rcx 342 cmpq %rsi,%rcx 343 je 4f 344#if defined(SWTCH_OPTIM_STATS) 345 decl _swtch_optim_stats 346 incl _tlb_flush_count 347#endif 348 movq %rcx,%cr3 3494: 350 /* 351 * Clear TDF_RUNNING flag in old thread only after cleaning up 352 * %cr3. The target thread is already protected by being TDF_RUNQ 353 * so setting TDF_RUNNING isn't as big a deal. 354 */ 355 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 356 orl $TDF_RUNNING,TD_FLAGS(%rax) 357 358 /* 359 * Deal with the PCB extension, restore the private tss 360 */ 361 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 362 /* JG cheaper than "movq $1,%rbx", right? */ 363 /* JG what's that magic value $1? */ 364 movl $1,%ebx /* maybe mark use of a private tss */ 365 testq %rdi,%rdi 366#if JG 367 jnz 2f 368#endif 369 370 /* 371 * Going back to the common_tss. We may need to update TSS_RSP0 372 * which sets the top of the supervisor stack when entering from 373 * usermode. The PCB is at the top of the stack but we need another 374 * 16 bytes to take vm86 into account. 375 */ 376 leaq -16(%rdx),%rbx 377 movq %rbx, PCPU(common_tss) + TSS_RSP0 378 movq %rbx, PCPU(rsp0) 379 380#if JG 381 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 382 je 3f /* already using the common TSS */ 383 384 /* JG? */ 385 subl %ebx,%ebx /* unmark use of private tss */ 386 387 /* 388 * Get the address of the common TSS descriptor for the ltr. 389 * There is no way to get the address of a segment-accessed variable 390 * so we store a self-referential pointer at the base of the per-cpu 391 * data area and add the appropriate offset. 392 */ 393 /* JG movl? */ 394 movq $gd_common_tssd, %rdi 395 /* JG name for "%gs:0"? */ 396 addq %gs:0, %rdi 397 398 /* 399 * Move the correct TSS descriptor into the GDT slot, then reload 400 * ltr. 401 */ 4022: 403 /* JG */ 404 movl %ebx,PCPU(private_tss) /* mark/unmark private tss */ 405 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 406 movq 0(%rdi), %rax 407 movq %rax, 0(%rbx) 408 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 409 ltr %si 410#endif 411 4123: 413 /* 414 * Restore the user %gs and %fs 415 */ 416 movq PCB_FSBASE(%rdx),%r9 417 cmpq PCPU(user_fs),%r9 418 je 4f 419 movq %rdx,%r10 420 movq %r9,PCPU(user_fs) 421 movl $MSR_FSBASE,%ecx 422 movl PCB_FSBASE(%r10),%eax 423 movl PCB_FSBASE+4(%r10),%edx 424 wrmsr 425 movq %r10,%rdx 4264: 427 movq PCB_GSBASE(%rdx),%r9 428 cmpq PCPU(user_gs),%r9 429 je 5f 430 movq %rdx,%r10 431 movq %r9,PCPU(user_gs) 432 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 433 movl PCB_GSBASE(%r10),%eax 434 movl PCB_GSBASE+4(%r10),%edx 435 wrmsr 436 movq %r10,%rdx 4375: 438 439 /* 440 * Restore general registers. 441 */ 442 movq PCB_RBX(%rdx), %rbx 443 movq PCB_RSP(%rdx), %rsp 444 movq PCB_RBP(%rdx), %rbp 445 movq PCB_R12(%rdx), %r12 446 movq PCB_R13(%rdx), %r13 447 movq PCB_R14(%rdx), %r14 448 movq PCB_R15(%rdx), %r15 449 movq PCB_RIP(%rdx), %rax 450 movq %rax, (%rsp) 451 452#if JG 453 /* 454 * Restore the user LDT if we have one 455 */ 456 cmpl $0, PCB_USERLDT(%edx) 457 jnz 1f 458 movl _default_ldt,%eax 459 cmpl PCPU(currentldt),%eax 460 je 2f 461 lldt _default_ldt 462 movl %eax,PCPU(currentldt) 463 jmp 2f 4641: pushl %edx 465 call set_user_ldt 466 popl %edx 4672: 468#endif 469#if JG 470 /* 471 * Restore the user TLS if we have one 472 */ 473 pushl %edx 474 call set_user_TLS 475 popl %edx 476#endif 477 478 /* 479 * Restore the DEBUG register state if necessary. 480 */ 481 movq PCB_FLAGS(%rdx),%rax 482 andq $PCB_DBREGS,%rax 483 jz 1f /* no, skip over */ 484 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 485 movq %rax,%dr6 486 movq PCB_DR3(%rdx),%rax 487 movq %rax,%dr3 488 movq PCB_DR2(%rdx),%rax 489 movq %rax,%dr2 490 movq PCB_DR1(%rdx),%rax 491 movq %rax,%dr1 492 movq PCB_DR0(%rdx),%rax 493 movq %rax,%dr0 494 movq %dr7,%rax /* load dr7 so as not to disturb */ 495 /* JG correct value? */ 496 andq $0x0000fc00,%rax /* reserved bits */ 497 /* JG we've got more registers on x86_64 */ 498 pushq %rbx 499 movq PCB_DR7(%rdx),%rbx 500 /* JG correct value? */ 501 andq $~0x0000fc00,%rbx 502 orq %rbx,%rax 503 popq %rbx 504 movq %rax,%dr7 5051: 506 ret 507 508/* 509 * savectx(struct pcb *pcb) 510 * 511 * Update pcb, saving current processor state. 512 */ 513ENTRY(savectx) 514 /* fetch PCB */ 515 /* JG use %rdi instead of %rcx everywhere? */ 516 movq %rdi,%rcx 517 518 /* caller's return address - child won't execute this routine */ 519 movq (%rsp),%rax 520 movq %rax,PCB_RIP(%rcx) 521 522 movq %cr3,%rax 523 movq %rax,PCB_CR3(%rcx) 524 525 movq %rbx,PCB_RBX(%rcx) 526 movq %rsp,PCB_RSP(%rcx) 527 movq %rbp,PCB_RBP(%rcx) 528 movq %r12,PCB_R12(%rcx) 529 movq %r13,PCB_R13(%rcx) 530 movq %r14,PCB_R14(%rcx) 531 movq %r15,PCB_R15(%rcx) 532 533#if 1 534 /* 535 * If npxthread == NULL, then the npx h/w state is irrelevant and the 536 * state had better already be in the pcb. This is true for forks 537 * but not for dumps (the old book-keeping with FP flags in the pcb 538 * always lost for dumps because the dump pcb has 0 flags). 539 * 540 * If npxthread != NULL, then we have to save the npx h/w state to 541 * npxthread's pcb and copy it to the requested pcb, or save to the 542 * requested pcb and reload. Copying is easier because we would 543 * have to handle h/w bugs for reloading. We used to lose the 544 * parent's npx state for forks by forgetting to reload. 545 */ 546 movq PCPU(npxthread),%rax 547 testq %rax,%rax 548 jz 1f 549 550 pushq %rcx /* target pcb */ 551 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 552 pushq %rax 553 554 movq %rax,%rdi 555 call npxsave 556 557 popq %rax 558 popq %rcx 559 560 movq $PCB_SAVEFPU_SIZE,%rdx 561 leaq PCB_SAVEFPU(%rcx),%rcx 562 movq %rcx,%rsi 563 movq %rax,%rdi 564 call bcopy 565#endif 566 5671: 568 ret 569 570/* 571 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 572 * 573 * Don't bother setting up any regs other than %rbp so backtraces 574 * don't die. This restore function is used to bootstrap into the 575 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 576 * switching. 577 * 578 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 579 * 580 * If we are an AP we have to call ap_init() before jumping to 581 * cpu_idle(). ap_init() will synchronize with the BP and finish 582 * setting up various ncpu-dependant globaldata fields. This may 583 * happen on UP as well as SMP if we happen to be simulating multiple 584 * cpus. 585 */ 586ENTRY(cpu_idle_restore) 587 /* cli */ 588 movq KPML4phys,%rcx 589 /* JG xor? */ 590 movq $0,%rbp 591 /* JG push RBP? */ 592 pushq $0 593 movq %rcx,%cr3 594 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 595 orl $TDF_RUNNING,TD_FLAGS(%rax) 596#ifdef SMP 597 cmpl $0,PCPU(cpuid) 598 je 1f 599 call ap_init 6001: 601#endif 602 /* 603 * ap_init can decide to enable interrupts early, but otherwise, or if 604 * we are UP, do it here. 605 */ 606 sti 607 jmp cpu_idle 608 609/* 610 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 611 * 612 * Don't bother setting up any regs other then %rbp so backtraces 613 * don't die. This restore function is used to bootstrap into an 614 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 615 * after this. 616 * 617 * Since all of our context is on the stack we are reentrant and 618 * we can release our critical section and enable interrupts early. 619 */ 620ENTRY(cpu_kthread_restore) 621 sti 622 movq KPML4phys,%rcx 623 movq TD_PCB(%rax),%rdx 624 /* JG "movq $0, %rbp"? "xorq %rbp, %rbp"? */ 625 movq $0,%rbp 626 movq %rcx,%cr3 627 /* rax and rbx come from the switchout code */ 628 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 629 orl $TDF_RUNNING,TD_FLAGS(%rax) 630 decl TD_CRITCOUNT(%rax) 631 movq PCB_R12(%rdx),%rdi /* argument to RBX function */ 632 movq PCB_RBX(%rdx),%rax /* thread function */ 633 /* note: top of stack return address inherited by function */ 634 jmp *%rax 635 636/* 637 * cpu_lwkt_switch(struct thread *) 638 * 639 * Standard LWKT switching function. Only non-scratch registers are 640 * saved and we don't bother with the MMU state or anything else. 641 * 642 * This function is always called while in a critical section. 643 * 644 * There is a one-instruction window where curthread is the new 645 * thread but %rsp still points to the old thread's stack, but 646 * we are protected by a critical section so it is ok. 647 */ 648ENTRY(cpu_lwkt_switch) 649 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */ 650 pushq %rbx 651 movq PCPU(curthread),%rbx 652 pushq %r12 653 pushq %r13 654 pushq %r14 655 pushq %r15 656 pushfq 657 658#if 1 659 /* 660 * Save the FP state if we have used the FP. Note that calling 661 * npxsave will NULL out PCPU(npxthread). 662 * 663 * We have to deal with the FP state for LWKT threads in case they 664 * happen to get preempted or block while doing an optimized 665 * bzero/bcopy/memcpy. 666 */ 667 cmpq %rbx,PCPU(npxthread) 668 jne 1f 669 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 670 movq TD_SAVEFPU(%rbx),%rdi 671 call npxsave /* do it in a big C function */ 672 movq %r12,%rdi /* restore %rdi */ 6731: 674#endif 675 676 movq %rdi,%rax /* switch to this thread */ 677 pushq $cpu_lwkt_restore 678 movq %rsp,TD_SP(%rbx) 679 movq %rax,PCPU(curthread) 680 movq TD_SP(%rax),%rsp 681 682 /* 683 * %rax contains new thread, %rbx contains old thread. 684 */ 685 ret 686 687/* 688 * cpu_lwkt_restore() (current thread in %rax on entry) 689 * 690 * Standard LWKT restore function. This function is always called 691 * while in a critical section. 692 * 693 * Warning: due to preemption the restore function can be used to 694 * 'return' to the original thread. Interrupt disablement must be 695 * protected through the switch so we cannot run splz here. 696 * 697 * YYY we theoretically do not need to load KPML4phys into cr3, but if 698 * so we need a way to detect when the PTD we are using is being 699 * deleted due to a process exiting. 700 */ 701ENTRY(cpu_lwkt_restore) 702 movq KPML4phys,%rcx /* YYY borrow but beware desched/cpuchg/exit */ 703 movq %cr3,%rdx 704 cmpq %rcx,%rdx 705 je 1f 706 movq %rcx,%cr3 7071: 708 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 709 orl $TDF_RUNNING,TD_FLAGS(%rax) 710 popfq 711 popq %r15 712 popq %r14 713 popq %r13 714 popq %r12 715 popq %rbx 716 popq %rbp 717 ret 718