1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#if JG 81#include <machine_base/apic/apicreg.h> 82#endif 83#include <machine/lock.h> 84 85#include "assym.s" 86 87#if defined(SMP) 88#define MPLOCKED lock ; 89#else 90#define MPLOCKED 91#endif 92 93 .data 94 95 .globl panic 96 .globl lwkt_switch_return 97 98#if defined(SWTCH_OPTIM_STATS) 99 .globl swtch_optim_stats, tlb_flush_count 100swtch_optim_stats: .long 0 /* number of _swtch_optims */ 101tlb_flush_count: .long 0 102#endif 103 104 .text 105 106 107/* 108 * cpu_heavy_switch(struct thread *next_thread) 109 * 110 * Switch from the current thread to a new thread. This entry 111 * is normally called via the thread->td_switch function, and will 112 * only be called when the current thread is a heavy weight process. 113 * 114 * Some instructions have been reordered to reduce pipeline stalls. 115 * 116 * YYY disable interrupts once giant is removed. 117 */ 118ENTRY(cpu_heavy_switch) 119 /* 120 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 121 */ 122 movq PCPU(curthread),%rcx 123 /* On top of the stack is the return adress. */ 124 movq (%rsp),%rax /* (reorder optimization) */ 125 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 126 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 127 movq %rbx,PCB_RBX(%rdx) 128 movq %rsp,PCB_RSP(%rdx) 129 movq %rbp,PCB_RBP(%rdx) 130 movq %r12,PCB_R12(%rdx) 131 movq %r13,PCB_R13(%rdx) 132 movq %r14,PCB_R14(%rdx) 133 movq %r15,PCB_R15(%rdx) 134 135 /* 136 * Clear the cpu bit in the pmap active mask. The restore 137 * function will set the bit in the pmap active mask. 138 * 139 * Special case: when switching between threads sharing the 140 * same vmspace if we avoid clearing the bit we do not have 141 * to reload %cr3 (if we clear the bit we could race page 142 * table ops done by other threads and would have to reload 143 * %cr3, because those ops will not know to IPI us). 144 */ 145 movq %rcx,%rbx /* RBX = oldthread */ 146 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 147 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 148 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 149 testq %r13,%r13 /* might not be a heavy */ 150 jz 1f 151 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 152 je 2f 1531: 154 movslq PCPU(cpuid), %rax 155 MPLOCKED btrq %rax, VM_PMAP+PM_ACTIVE(%rcx) 1562: 157 158 /* 159 * Push the LWKT switch restore function, which resumes a heavy 160 * weight process. Note that the LWKT switcher is based on 161 * TD_SP, while the heavy weight process switcher is based on 162 * PCB_RSP. TD_SP is usually two ints pushed relative to 163 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 164 */ 165 pushfq 166 cli 167 movq $cpu_heavy_restore, %rax 168 pushq %rax 169 movq %rsp,TD_SP(%rbx) 170 171 /* 172 * Save debug regs if necessary 173 */ 174 movq PCB_FLAGS(%rdx),%rax 175 andq $PCB_DBREGS,%rax 176 jz 1f /* no, skip over */ 177 movq %dr7,%rax /* yes, do the save */ 178 movq %rax,PCB_DR7(%rdx) 179 /* JG correct value? */ 180 andq $0x0000fc00, %rax /* disable all watchpoints */ 181 movq %rax,%dr7 182 movq %dr6,%rax 183 movq %rax,PCB_DR6(%rdx) 184 movq %dr3,%rax 185 movq %rax,PCB_DR3(%rdx) 186 movq %dr2,%rax 187 movq %rax,PCB_DR2(%rdx) 188 movq %dr1,%rax 189 movq %rax,PCB_DR1(%rdx) 190 movq %dr0,%rax 191 movq %rax,PCB_DR0(%rdx) 1921: 193 194#if 1 195 /* 196 * Save the FP state if we have used the FP. Note that calling 197 * npxsave will NULL out PCPU(npxthread). 198 */ 199 cmpq %rbx,PCPU(npxthread) 200 jne 1f 201 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 202 movq TD_SAVEFPU(%rbx),%rdi 203 call npxsave /* do it in a big C function */ 204 movq %r12,%rdi /* restore %rdi */ 2051: 206#endif 207 208 /* 209 * Switch to the next thread, which was passed as an argument 210 * to cpu_heavy_switch(). The argument is in %rdi. 211 * Set the current thread, load the stack pointer, 212 * and 'ret' into the switch-restore function. 213 * 214 * The switch restore function expects the new thread to be in %rax 215 * and the old one to be in %rbx. 216 * 217 * There is a one-instruction window where curthread is the new 218 * thread but %rsp still points to the old thread's stack, but 219 * we are protected by a critical section so it is ok. 220 */ 221 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 222 movq %rax,PCPU(curthread) 223 movq TD_SP(%rax),%rsp 224 ret 225 226/* 227 * cpu_exit_switch(struct thread *next) 228 * 229 * The switch function is changed to this when a thread is going away 230 * for good. We have to ensure that the MMU state is not cached, and 231 * we don't bother saving the existing thread state before switching. 232 * 233 * At this point we are in a critical section and this cpu owns the 234 * thread's token, which serves as an interlock until the switchout is 235 * complete. 236 */ 237ENTRY(cpu_exit_switch) 238 /* 239 * Get us out of the vmspace 240 */ 241 movq KPML4phys,%rcx 242 movq %cr3,%rax 243#if 1 244 cmpq %rcx,%rax 245 je 1f 246#endif 247 /* JG no increment of statistics counters? see cpu_heavy_restore */ 248 movq %rcx,%cr3 2491: 250 movq PCPU(curthread),%rbx 251 252 /* 253 * If this is a process/lwp, deactivate the pmap after we've 254 * switched it out. 255 */ 256 movq TD_LWP(%rbx),%rcx 257 testq %rcx,%rcx 258 jz 2f 259 movslq PCPU(cpuid), %rax 260 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 261 MPLOCKED btrq %rax, VM_PMAP+PM_ACTIVE(%rcx) 2622: 263 /* 264 * Switch to the next thread. RET into the restore function, which 265 * expects the new thread in RAX and the old in RBX. 266 * 267 * There is a one-instruction window where curthread is the new 268 * thread but %rsp still points to the old thread's stack, but 269 * we are protected by a critical section so it is ok. 270 */ 271 cli 272 movq %rdi,%rax 273 movq %rax,PCPU(curthread) 274 movq TD_SP(%rax),%rsp 275 ret 276 277/* 278 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx) 279 * 280 * Restore the thread after an LWKT switch. This entry is normally 281 * called via the LWKT switch restore function, which was pulled 282 * off the thread stack and jumped to. 283 * 284 * This entry is only called if the thread was previously saved 285 * using cpu_heavy_switch() (the heavy weight process thread switcher), 286 * or when a new process is initially scheduled. 287 * 288 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 289 * a preemption switch may interrupt the process and then return via 290 * cpu_heavy_restore. 291 * 292 * YYY theoretically we do not have to restore everything here, a lot 293 * of this junk can wait until we return to usermode. But for now 294 * we restore everything. 295 * 296 * YYY the PCB crap is really crap, it makes startup a bitch because 297 * we can't switch away. 298 * 299 * YYY note: spl check is done in mi_switch when it splx()'s. 300 */ 301 302ENTRY(cpu_heavy_restore) 303 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 304 movq %rdx, PCPU(common_tss) + TSS_RSP0 305 popfq 306 307#if defined(SWTCH_OPTIM_STATS) 308 incl _swtch_optim_stats 309#endif 310 /* 311 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 312 * safely test/reload %cr3 until after we have set the bit in the 313 * pmap. 314 * 315 * We must do an interlocked test of the CPUMASK_BIT at the same 316 * time. If found to be set we will have to wait for it to clear 317 * and then do a forced reload of %cr3 (even if the value matches). 318 * 319 * XXX When switching between two LWPs sharing the same vmspace 320 * the cpu_heavy_switch() code currently avoids clearing the 321 * cpu bit in PM_ACTIVE. So if the bit is already set we can 322 * avoid checking for the interlock via CPUMASK_BIT. We currently 323 * do not perform this optimization. 324 * 325 * %rax is needed for the cmpxchgl so store newthread in %r12 326 * temporarily. 327 */ 328 movq TD_LWP(%rax),%rcx 329 movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */ 330#ifdef SMP 331 movq %rax,%r12 /* save newthread ptr */ 3321: 333 movq VM_PMAP+PM_ACTIVE(%rcx),%rax /* old contents */ 334 movq PCPU(cpumask),%rsi /* new contents */ 335 orq %rax,%rsi 336 MPLOCKED cmpxchgq %rsi,VM_PMAP+PM_ACTIVE(%rcx) 337 jnz 1b 338 339 /* 340 * Check CPUMASK_BIT 341 */ 342 btq $CPUMASK_BIT,%rax /* test interlock */ 343 jnc 1f 344 345#if 0 346 movq TD_PCB(%r12),%rdx /* XXX debugging unconditional */ 347 movq PCB_CR3(%rdx),%rdx /* reloading of %cr3 */ 348 movq %rdx,%cr3 349#endif 350 351 movq %rcx,%rdi /* (found to be set) */ 352 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 353 354 /* 355 * Need unconditional load cr3 356 */ 357 movq %r12,%rax 358 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 359 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 360 jmp 2f /* unconditional reload */ 3611: 362 movq %r12,%rax /* restore RAX = newthread */ 363#else 364 movq PCPU(cpumask),%rsi 365 orq %rsi,VM_PMAP+PM_ACTIVE(%rcx) 366#endif 367 /* 368 * Restore the MMU address space. If it is the same as the last 369 * thread we don't have to invalidate the tlb (i.e. reload cr3). 370 * YYY which naturally also means that the PM_ACTIVE bit had better 371 * already have been set before we set it above, check? YYY 372 */ 373 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 374 movq %cr3,%rsi /* RSI = current CR3 */ 375 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 376 cmpq %rsi,%rcx 377 je 4f 3782: 379#if defined(SWTCH_OPTIM_STATS) 380 decl _swtch_optim_stats 381 incl _tlb_flush_count 382#endif 383 movq %rcx,%cr3 3844: 385 386 /* 387 * NOTE: %rbx is the previous thread and %rax is the new thread. 388 * %rbx is retained throughout so we can return it. 389 * 390 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 391 */ 392 393 /* 394 * Deal with the PCB extension, restore the private tss 395 */ 396 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 397 movq $1,%rcx /* maybe mark use of a private tss */ 398 testq %rdi,%rdi 399#if JG 400 jnz 2f 401#endif 402 403 /* 404 * Going back to the common_tss. We may need to update TSS_RSP0 405 * which sets the top of the supervisor stack when entering from 406 * usermode. The PCB is at the top of the stack but we need another 407 * 16 bytes to take vm86 into account. 408 */ 409 movq %rdx,%rcx 410 /*leaq -TF_SIZE(%rdx),%rcx*/ 411 movq %rcx, PCPU(common_tss) + TSS_RSP0 412 413#if JG 414 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 415 je 3f /* already using the common TSS */ 416 417 /* JG? */ 418 subq %rcx,%rcx /* unmark use of private tss */ 419 420 /* 421 * Get the address of the common TSS descriptor for the ltr. 422 * There is no way to get the address of a segment-accessed variable 423 * so we store a self-referential pointer at the base of the per-cpu 424 * data area and add the appropriate offset. 425 */ 426 /* JG movl? */ 427 movq $gd_common_tssd, %rdi 428 /* JG name for "%gs:0"? */ 429 addq %gs:0, %rdi 430 431 /* 432 * Move the correct TSS descriptor into the GDT slot, then reload 433 * ltr. 434 */ 4352: 436 /* JG */ 437 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 438 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 439 movq 0(%rdi), %rax 440 movq %rax, 0(%rbx) 441 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 442 ltr %si 443#endif 444 4453: 446 /* 447 * Restore the user %gs and %fs 448 */ 449 movq PCB_FSBASE(%rdx),%r9 450 cmpq PCPU(user_fs),%r9 451 je 4f 452 movq %rdx,%r10 453 movq %r9,PCPU(user_fs) 454 movl $MSR_FSBASE,%ecx 455 movl PCB_FSBASE(%r10),%eax 456 movl PCB_FSBASE+4(%r10),%edx 457 wrmsr 458 movq %r10,%rdx 4594: 460 movq PCB_GSBASE(%rdx),%r9 461 cmpq PCPU(user_gs),%r9 462 je 5f 463 movq %rdx,%r10 464 movq %r9,PCPU(user_gs) 465 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 466 movl PCB_GSBASE(%r10),%eax 467 movl PCB_GSBASE+4(%r10),%edx 468 wrmsr 469 movq %r10,%rdx 4705: 471 472 /* 473 * Restore general registers. %rbx is restored later. 474 */ 475 movq PCB_RSP(%rdx), %rsp 476 movq PCB_RBP(%rdx), %rbp 477 movq PCB_R12(%rdx), %r12 478 movq PCB_R13(%rdx), %r13 479 movq PCB_R14(%rdx), %r14 480 movq PCB_R15(%rdx), %r15 481 movq PCB_RIP(%rdx), %rax 482 movq %rax, (%rsp) 483 movw $KDSEL,%ax 484 movw %ax,%es 485 486#if JG 487 /* 488 * Restore the user LDT if we have one 489 */ 490 cmpl $0, PCB_USERLDT(%edx) 491 jnz 1f 492 movl _default_ldt,%eax 493 cmpl PCPU(currentldt),%eax 494 je 2f 495 lldt _default_ldt 496 movl %eax,PCPU(currentldt) 497 jmp 2f 4981: pushl %edx 499 call set_user_ldt 500 popl %edx 5012: 502#endif 503#if JG 504 /* 505 * Restore the user TLS if we have one 506 */ 507 pushl %edx 508 call set_user_TLS 509 popl %edx 510#endif 511 512 /* 513 * Restore the DEBUG register state if necessary. 514 */ 515 movq PCB_FLAGS(%rdx),%rax 516 andq $PCB_DBREGS,%rax 517 jz 1f /* no, skip over */ 518 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 519 movq %rax,%dr6 520 movq PCB_DR3(%rdx),%rax 521 movq %rax,%dr3 522 movq PCB_DR2(%rdx),%rax 523 movq %rax,%dr2 524 movq PCB_DR1(%rdx),%rax 525 movq %rax,%dr1 526 movq PCB_DR0(%rdx),%rax 527 movq %rax,%dr0 528 movq %dr7,%rax /* load dr7 so as not to disturb */ 529 /* JG correct value? */ 530 andq $0x0000fc00,%rax /* reserved bits */ 531 /* JG we've got more registers on x86_64 */ 532 movq PCB_DR7(%rdx),%rcx 533 /* JG correct value? */ 534 andq $~0x0000fc00,%rcx 535 orq %rcx,%rax 536 movq %rax,%dr7 537 538 /* 539 * Clear the QUICKRET flag when restoring a user process context 540 * so we don't try to do a quick syscall return. 541 */ 5421: 543 andl $~RQF_QUICKRET,PCPU(reqflags) 544 movq %rbx,%rax 545 movq PCB_RBX(%rdx),%rbx 546 ret 547 548/* 549 * savectx(struct pcb *pcb) 550 * 551 * Update pcb, saving current processor state. 552 */ 553ENTRY(savectx) 554 /* fetch PCB */ 555 /* JG use %rdi instead of %rcx everywhere? */ 556 movq %rdi,%rcx 557 558 /* caller's return address - child won't execute this routine */ 559 movq (%rsp),%rax 560 movq %rax,PCB_RIP(%rcx) 561 562 movq %cr3,%rax 563 movq %rax,PCB_CR3(%rcx) 564 565 movq %rbx,PCB_RBX(%rcx) 566 movq %rsp,PCB_RSP(%rcx) 567 movq %rbp,PCB_RBP(%rcx) 568 movq %r12,PCB_R12(%rcx) 569 movq %r13,PCB_R13(%rcx) 570 movq %r14,PCB_R14(%rcx) 571 movq %r15,PCB_R15(%rcx) 572 573#if 1 574 /* 575 * If npxthread == NULL, then the npx h/w state is irrelevant and the 576 * state had better already be in the pcb. This is true for forks 577 * but not for dumps (the old book-keeping with FP flags in the pcb 578 * always lost for dumps because the dump pcb has 0 flags). 579 * 580 * If npxthread != NULL, then we have to save the npx h/w state to 581 * npxthread's pcb and copy it to the requested pcb, or save to the 582 * requested pcb and reload. Copying is easier because we would 583 * have to handle h/w bugs for reloading. We used to lose the 584 * parent's npx state for forks by forgetting to reload. 585 */ 586 movq PCPU(npxthread),%rax 587 testq %rax,%rax 588 jz 1f 589 590 pushq %rcx /* target pcb */ 591 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 592 pushq %rax 593 594 movq %rax,%rdi 595 call npxsave 596 597 popq %rax 598 popq %rcx 599 600 movq $PCB_SAVEFPU_SIZE,%rdx 601 leaq PCB_SAVEFPU(%rcx),%rcx 602 movq %rcx,%rsi 603 movq %rax,%rdi 604 call bcopy 605#endif 606 6071: 608 ret 609 610/* 611 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 612 * 613 * Don't bother setting up any regs other than %rbp so backtraces 614 * don't die. This restore function is used to bootstrap into the 615 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 616 * switching. 617 * 618 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 619 * This only occurs during system boot so no special handling is 620 * required for migration. 621 * 622 * If we are an AP we have to call ap_init() before jumping to 623 * cpu_idle(). ap_init() will synchronize with the BP and finish 624 * setting up various ncpu-dependant globaldata fields. This may 625 * happen on UP as well as SMP if we happen to be simulating multiple 626 * cpus. 627 */ 628ENTRY(cpu_idle_restore) 629 /* cli */ 630 movq KPML4phys,%rcx 631 /* JG xor? */ 632 movq $0,%rbp 633 /* JG push RBP? */ 634 pushq $0 635 movq %rcx,%cr3 636 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 637 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 638#ifdef SMP 639 cmpl $0,PCPU(cpuid) 640 je 1f 641 call ap_init 6421: 643#endif 644 /* 645 * ap_init can decide to enable interrupts early, but otherwise, or if 646 * we are UP, do it here. 647 */ 648 sti 649 jmp cpu_idle 650 651/* 652 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx) 653 * (one-time execution) 654 * 655 * Don't bother setting up any regs other then %rbp so backtraces 656 * don't die. This restore function is used to bootstrap into an 657 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 658 * after this. 659 * 660 * Because this switch target does not 'return' to lwkt_switch() 661 * we have to call lwkt_switch_return(otd) to clean up otd. 662 * otd is in %ebx. 663 * 664 * Since all of our context is on the stack we are reentrant and 665 * we can release our critical section and enable interrupts early. 666 */ 667ENTRY(cpu_kthread_restore) 668 sti 669 movq KPML4phys,%rcx 670 movq TD_PCB(%rax),%r13 671 xorq %rbp,%rbp 672 movq %rcx,%cr3 673 674 /* 675 * rax and rbx come from the switchout code. Call 676 * lwkt_switch_return(otd). 677 * 678 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs. 679 */ 680 pushq %rax 681 movq %rbx,%rdi 682 call lwkt_switch_return 683 popq %rax 684 decl TD_CRITCOUNT(%rax) 685 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 686 movq PCB_RBX(%r13),%rax /* thread function */ 687 /* note: top of stack return address inherited by function */ 688 jmp *%rax 689 690/* 691 * cpu_lwkt_switch(struct thread *) 692 * 693 * Standard LWKT switching function. Only non-scratch registers are 694 * saved and we don't bother with the MMU state or anything else. 695 * 696 * This function is always called while in a critical section. 697 * 698 * There is a one-instruction window where curthread is the new 699 * thread but %rsp still points to the old thread's stack, but 700 * we are protected by a critical section so it is ok. 701 */ 702ENTRY(cpu_lwkt_switch) 703 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */ 704 pushq %rbx 705 movq PCPU(curthread),%rbx /* becomes old thread in restore */ 706 pushq %r12 707 pushq %r13 708 pushq %r14 709 pushq %r15 710 pushfq 711 cli 712 713#if 1 714 /* 715 * Save the FP state if we have used the FP. Note that calling 716 * npxsave will NULL out PCPU(npxthread). 717 * 718 * We have to deal with the FP state for LWKT threads in case they 719 * happen to get preempted or block while doing an optimized 720 * bzero/bcopy/memcpy. 721 */ 722 cmpq %rbx,PCPU(npxthread) 723 jne 1f 724 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 725 movq TD_SAVEFPU(%rbx),%rdi 726 call npxsave /* do it in a big C function */ 727 movq %r12,%rdi /* restore %rdi */ 7281: 729#endif 730 731 movq %rdi,%rax /* switch to this thread */ 732 pushq $cpu_lwkt_restore 733 movq %rsp,TD_SP(%rbx) 734 /* 735 * %rax contains new thread, %rbx contains old thread. 736 */ 737 movq %rax,PCPU(curthread) 738 movq TD_SP(%rax),%rsp 739 ret 740 741/* 742 * cpu_lwkt_restore() (current thread in %rax on entry) 743 * 744 * Standard LWKT restore function. This function is always called 745 * while in a critical section. 746 * 747 * Warning: due to preemption the restore function can be used to 748 * 'return' to the original thread. Interrupt disablement must be 749 * protected through the switch so we cannot run splz here. 750 * 751 * YYY we theoretically do not need to load KPML4phys into cr3, but if 752 * so we need a way to detect when the PTD we are using is being 753 * deleted due to a process exiting. 754 */ 755ENTRY(cpu_lwkt_restore) 756 movq KPML4phys,%rcx /* YYY borrow but beware desched/cpuchg/exit */ 757 movq %cr3,%rdx 758#if 1 759 cmpq %rcx,%rdx 760 je 1f 761#endif 762 movq %rcx,%cr3 7631: 764 /* 765 * Safety, clear RSP0 in the tss so it isn't pointing at the 766 * previous thread's kstack (if a heavy weight user thread). 767 * RSP0 should only be used in ring 3 transitions and kernel 768 * threads run in ring 0 so there should be none. 769 */ 770 xorq %rdx,%rdx 771 movq %rdx, PCPU(common_tss) + TSS_RSP0 772 773 /* 774 * NOTE: %rbx is the previous thread and %rax is the new thread. 775 * %rbx is retained throughout so we can return it. 776 * 777 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 778 */ 779 movq %rbx,%rax 780 popfq 781 popq %r15 782 popq %r14 783 popq %r13 784 popq %r12 785 popq %rbx 786 popq %rbp 787 ret 788