1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 66 */ 67 68//#include "use_npx.h" 69 70#include <sys/rtprio.h> 71 72#include <machine/asmacros.h> 73#include <machine/segments.h> 74 75#include <machine/pmap.h> 76#include <machine/lock.h> 77 78#define CHECKNZ(expr, scratch_reg) \ 79 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7: 80 81#include "assym.s" 82 83#define MPLOCKED lock ; 84 85 .data 86 87 .globl panic 88 .globl lwkt_switch_return 89 90#if defined(SWTCH_OPTIM_STATS) 91 .globl swtch_optim_stats, tlb_flush_count 92swtch_optim_stats: .long 0 /* number of _swtch_optims */ 93tlb_flush_count: .long 0 94#endif 95 96 .text 97 98 99/* 100 * cpu_heavy_switch(struct thread *next_thread) 101 * 102 * Switch from the current thread to a new thread. This entry 103 * is normally called via the thread->td_switch function, and will 104 * only be called when the current thread is a heavy weight process. 105 * 106 * Some instructions have been reordered to reduce pipeline stalls. 107 * 108 * YYY disable interrupts once giant is removed. 109 */ 110ENTRY(cpu_heavy_switch) 111 /* 112 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 113 */ 114 movq PCPU(curthread),%rcx 115 /* On top of the stack is the return adress. */ 116 movq (%rsp),%rax /* (reorder optimization) */ 117 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 118 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 119 movq %rbx,PCB_RBX(%rdx) 120 movq %rsp,PCB_RSP(%rdx) 121 movq %rbp,PCB_RBP(%rdx) 122 movq %r12,PCB_R12(%rdx) 123 movq %r13,PCB_R13(%rdx) 124 movq %r14,PCB_R14(%rdx) 125 movq %r15,PCB_R15(%rdx) 126 127 /* 128 * Clear the cpu bit in the pmap active mask. The restore 129 * function will set the bit in the pmap active mask. 130 * 131 * Special case: when switching between threads sharing the 132 * same vmspace if we avoid clearing the bit we do not have 133 * to reload %cr3 (if we clear the bit we could race page 134 * table ops done by other threads and would have to reload 135 * %cr3, because those ops will not know to IPI us). 136 */ 137 movq %rcx,%rbx /* RBX = oldthread */ 138 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 139 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 140 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 141 testq %r13,%r13 /* might not be a heavy */ 142 jz 1f 143 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 144 je 2f 1451: 146 movq PCPU(other_cpus)+0, %rax 147 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 148 movq PCPU(other_cpus)+8, %rax 149 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 150 movq PCPU(other_cpus)+16, %rax 151 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 152 movq PCPU(other_cpus)+24, %rax 153 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 1542: 155 156 /* 157 * Push the LWKT switch restore function, which resumes a heavy 158 * weight process. Note that the LWKT switcher is based on 159 * TD_SP, while the heavy weight process switcher is based on 160 * PCB_RSP. TD_SP is usually two ints pushed relative to 161 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 162 */ 163 pushfq 164 movq $cpu_heavy_restore, %rax 165 pushq %rax 166 movq %rsp,TD_SP(%rbx) 167 168 /* 169 * Save debug regs if necessary 170 */ 171 movq PCB_FLAGS(%rdx),%rax 172 andq $PCB_DBREGS,%rax 173 jz 1f /* no, skip over */ 174 movq %dr7,%rax /* yes, do the save */ 175 movq %rax,PCB_DR7(%rdx) 176 /* JG correct value? */ 177 andq $0x0000fc00, %rax /* disable all watchpoints */ 178 movq %rax,%dr7 179 movq %dr6,%rax 180 movq %rax,PCB_DR6(%rdx) 181 movq %dr3,%rax 182 movq %rax,PCB_DR3(%rdx) 183 movq %dr2,%rax 184 movq %rax,PCB_DR2(%rdx) 185 movq %dr1,%rax 186 movq %rax,PCB_DR1(%rdx) 187 movq %dr0,%rax 188 movq %rax,PCB_DR0(%rdx) 1891: 190 191#if 1 192 /* 193 * Save the FP state if we have used the FP. Note that calling 194 * npxsave will NULL out PCPU(npxthread). 195 */ 196 cmpq %rbx,PCPU(npxthread) 197 jne 1f 198 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 199 movq TD_SAVEFPU(%rbx),%rdi 200 call npxsave /* do it in a big C function */ 201 movq %r12,%rdi /* restore %rdi */ 2021: 203#endif 204 205 /* 206 * Switch to the next thread, which was passed as an argument 207 * to cpu_heavy_switch(). The argument is in %rdi. 208 * Set the current thread, load the stack pointer, 209 * and 'ret' into the switch-restore function. 210 * 211 * The switch restore function expects the new thread to be in %rax 212 * and the old one to be in %rbx. 213 * 214 * There is a one-instruction window where curthread is the new 215 * thread but %rsp still points to the old thread's stack, but 216 * we are protected by a critical section so it is ok. 217 */ 218 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 219 movq %rax,PCPU(curthread) 220 movq TD_SP(%rax),%rsp 221 CHECKNZ((%rsp), %r9) 222 ret 223 224/* 225 * cpu_exit_switch(struct thread *next) 226 * 227 * The switch function is changed to this when a thread is going away 228 * for good. We have to ensure that the MMU state is not cached, and 229 * we don't bother saving the existing thread state before switching. 230 * 231 * At this point we are in a critical section and this cpu owns the 232 * thread's token, which serves as an interlock until the switchout is 233 * complete. 234 */ 235ENTRY(cpu_exit_switch) 236 /* 237 * Get us out of the vmspace 238 */ 239#if 0 240 movq KPML4phys,%rcx 241 movq %cr3,%rax 242 cmpq %rcx,%rax 243 je 1f 244 /* JG no increment of statistics counters? see cpu_heavy_restore */ 245 movq %rcx,%cr3 2461: 247#endif 248 movq PCPU(curthread),%rbx 249 250 /* 251 * If this is a process/lwp, deactivate the pmap after we've 252 * switched it out. 253 */ 254 movq TD_LWP(%rbx),%rcx 255 testq %rcx,%rcx 256 jz 2f 257 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 258 movq PCPU(other_cpus)+0, %rax 259 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 260 movq PCPU(other_cpus)+8, %rax 261 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 262 movq PCPU(other_cpus)+16, %rax 263 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 264 movq PCPU(other_cpus)+24, %rax 265 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 2662: 267 /* 268 * Switch to the next thread. RET into the restore function, which 269 * expects the new thread in RAX and the old in RBX. 270 * 271 * There is a one-instruction window where curthread is the new 272 * thread but %rsp still points to the old thread's stack, but 273 * we are protected by a critical section so it is ok. 274 */ 275 movq %rdi,%rax 276 movq %rax,PCPU(curthread) 277 movq TD_SP(%rax),%rsp 278 CHECKNZ((%rsp), %r9) 279 ret 280 281/* 282 * cpu_heavy_restore() (current thread in %rax on entry, %rbx is old thread) 283 * 284 * Restore the thread after an LWKT switch. This entry is normally 285 * called via the LWKT switch restore function, which was pulled 286 * off the thread stack and jumped to. 287 * 288 * This entry is only called if the thread was previously saved 289 * using cpu_heavy_switch() (the heavy weight process thread switcher), 290 * or when a new process is initially scheduled. 291 * 292 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 293 * a preemption switch may interrupt the process and then return via 294 * cpu_heavy_restore. 295 * 296 * YYY theoretically we do not have to restore everything here, a lot 297 * of this junk can wait until we return to usermode. But for now 298 * we restore everything. 299 * 300 * YYY the PCB crap is really crap, it makes startup a bitch because 301 * we can't switch away. 302 * 303 * YYY note: spl check is done in mi_switch when it splx()'s. 304 */ 305 306ENTRY(cpu_heavy_restore) 307 popfq 308 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 309 310#if defined(SWTCH_OPTIM_STATS) 311 incl _swtch_optim_stats 312#endif 313 /* 314 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 315 * safely test/reload %cr3 until after we have set the bit in the 316 * pmap (remember, we do not hold the MP lock in the switch code). 317 */ 318 movq TD_LWP(%rax),%rcx 319 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 320 321 movq PCPU(cpumask)+0, %rsi 322 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+0(%rcx) 323 movq PCPU(cpumask)+8, %rsi 324 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+8(%rcx) 325 movq PCPU(cpumask)+16, %rsi 326 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+16(%rcx) 327 movq PCPU(cpumask)+24, %rsi 328 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+24(%rcx) 329 330 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi 331 testl $CPULOCK_EXCL,%esi 332 jz 1f 333 334 movq %rax,%r12 /* save newthread ptr */ 335 movq %rcx,%rdi /* (found to be set) */ 336 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 337 movq %r12,%rax 338 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 3391: 340 /* 341 * Restore the MMU address space. If it is the same as the last 342 * thread we don't have to invalidate the tlb (i.e. reload cr3). 343 * YYY which naturally also means that the PM_ACTIVE bit had better 344 * already have been set before we set it above, check? YYY 345 */ 346#if 0 347 movq %cr3,%rsi 348 movq PCB_CR3(%rdx),%rcx 349 cmpq %rsi,%rcx 350 je 4f 351#if defined(SWTCH_OPTIM_STATS) 352 decl _swtch_optim_stats 353 incl _tlb_flush_count 354#endif 355 movq %rcx,%cr3 3564: 357#endif 358 /* 359 * NOTE: %rbx is the previous thread and %rax is the new thread. 360 * %rbx is retained throughout so we can return it. 361 * 362 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 363 */ 364 365#if 0 366 /* 367 * Deal with the PCB extension, restore the private tss 368 */ 369 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 370 movq $1,%rcx /* maybe mark use of a private tss */ 371 testq %rdi,%rdi 372#if JG 373 jnz 2f 374#endif 375 376 /* JG 377 * Going back to the common_tss. We may need to update TSS_ESP0 378 * which sets the top of the supervisor stack when entering from 379 * usermode. The PCB is at the top of the stack but we need another 380 * 16 bytes to take vm86 into account. 381 */ 382 leaq -16(%rdx),%rcx 383 movq %rcx, PCPU(common_tss) + TSS_RSP0 384 movq %rcx, PCPU(rsp0) 385 386#if JG 387 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 388 je 3f /* already using the common TSS */ 389 390 /* JG? */ 391 subq %rcx,%rcx /* unmark use of private tss */ 392 393 /* 394 * Get the address of the common TSS descriptor for the ltr. 395 * There is no way to get the address of a segment-accessed variable 396 * so we store a self-referential pointer at the base of the per-cpu 397 * data area and add the appropriate offset. 398 */ 399 /* JG movl? */ 400 movq $gd_common_tssd, %rdi 401 /* JG name for "%gs:0"? */ 402 addq %gs:0, %rdi 403 404 /* 405 * Move the correct TSS descriptor into the GDT slot, then reload 406 * ltr. 407 */ 4082: 409 /* JG */ 410 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 411 movq PCPU(tss_gdt), %rcx /* entry in GDT */ 412 movq 0(%rdi), %rax 413 movq %rax, 0(%rcx) 414 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 415 ltr %si 416#endif 417 4183: 419#endif 420#if 0 421 /* 422 * Restore the user %gs and %fs 423 */ 424 movq PCB_FSBASE(%rdx),%r9 425 cmpq PCPU(user_fs),%r9 426 je 4f 427 movq %rdx,%r10 428 movq %r9,PCPU(user_fs) 429 movl $MSR_FSBASE,%ecx 430 movl PCB_FSBASE(%r10),%eax 431 movl PCB_FSBASE+4(%r10),%edx 432 wrmsr 433 movq %r10,%rdx 4344: 435 movq PCB_GSBASE(%rdx),%r9 436 cmpq PCPU(user_gs),%r9 437 je 5f 438 movq %rdx,%r10 439 movq %r9,PCPU(user_gs) 440 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 441 movl PCB_GSBASE(%r10),%eax 442 movl PCB_GSBASE+4(%r10),%edx 443 wrmsr 444 movq %r10,%rdx 4455: 446#endif 447 448 /* 449 * Restore general registers. %rbx is restored later. 450 */ 451 movq PCB_RSP(%rdx), %rsp 452 movq PCB_RBP(%rdx), %rbp 453 movq PCB_R12(%rdx), %r12 454 movq PCB_R13(%rdx), %r13 455 movq PCB_R14(%rdx), %r14 456 movq PCB_R15(%rdx), %r15 457 movq PCB_RIP(%rdx), %rax 458 movq %rax, (%rsp) 459 460#if 0 461 /* 462 * Restore the user LDT if we have one 463 */ 464 cmpl $0, PCB_USERLDT(%edx) 465 jnz 1f 466 movl _default_ldt,%eax 467 cmpl PCPU(currentldt),%eax 468 je 2f 469 lldt _default_ldt 470 movl %eax,PCPU(currentldt) 471 jmp 2f 4721: pushl %edx 473 call set_user_ldt 474 popl %edx 4752: 476#endif 477#if 0 478 /* 479 * Restore the user TLS if we have one 480 */ 481 pushl %edx 482 call set_user_TLS 483 popl %edx 484#endif 485 486 /* 487 * Restore the DEBUG register state if necessary. 488 */ 489 movq PCB_FLAGS(%rdx),%rax 490 andq $PCB_DBREGS,%rax 491 jz 1f /* no, skip over */ 492 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 493 movq %rax,%dr6 494 movq PCB_DR3(%rdx),%rax 495 movq %rax,%dr3 496 movq PCB_DR2(%rdx),%rax 497 movq %rax,%dr2 498 movq PCB_DR1(%rdx),%rax 499 movq %rax,%dr1 500 movq PCB_DR0(%rdx),%rax 501 movq %rax,%dr0 502 movq %dr7,%rax /* load dr7 so as not to disturb */ 503 /* JG correct value? */ 504 andq $0x0000fc00,%rax /* reserved bits */ 505 /* JG we've got more registers on x86_64 */ 506 movq PCB_DR7(%rdx),%rcx 507 /* JG correct value? */ 508 andq $~0x0000fc00,%rcx 509 orq %rcx,%rax 510 movq %rax,%dr7 5111: 512 movq %rbx,%rax 513 movq PCB_RBX(%rdx),%rbx 514 515 CHECKNZ((%rsp), %r9) 516 ret 517 518/* 519 * savectx(struct pcb *pcb) 520 * 521 * Update pcb, saving current processor state. 522 */ 523ENTRY(savectx) 524 /* fetch PCB */ 525 /* JG use %rdi instead of %rcx everywhere? */ 526 movq %rdi,%rcx 527 528 /* caller's return address - child won't execute this routine */ 529 movq (%rsp),%rax 530 movq %rax,PCB_RIP(%rcx) 531 movq %rbx,PCB_RBX(%rcx) 532 movq %rsp,PCB_RSP(%rcx) 533 movq %rbp,PCB_RBP(%rcx) 534 movq %r12,PCB_R12(%rcx) 535 movq %r13,PCB_R13(%rcx) 536 movq %r14,PCB_R14(%rcx) 537 movq %r15,PCB_R15(%rcx) 538 539#if 1 540 /* 541 * If npxthread == NULL, then the npx h/w state is irrelevant and the 542 * state had better already be in the pcb. This is true for forks 543 * but not for dumps (the old book-keeping with FP flags in the pcb 544 * always lost for dumps because the dump pcb has 0 flags). 545 * 546 * If npxthread != NULL, then we have to save the npx h/w state to 547 * npxthread's pcb and copy it to the requested pcb, or save to the 548 * requested pcb and reload. Copying is easier because we would 549 * have to handle h/w bugs for reloading. We used to lose the 550 * parent's npx state for forks by forgetting to reload. 551 */ 552 movq PCPU(npxthread),%rax 553 testq %rax,%rax 554 jz 1f 555 556 pushq %rcx /* target pcb */ 557 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 558 pushq %rax 559 560 movq %rax,%rdi 561 call npxsave 562 563 popq %rax 564 popq %rcx 565 566 movq $PCB_SAVEFPU_SIZE,%rdx 567 leaq PCB_SAVEFPU(%rcx),%rcx 568 movq %rcx,%rsi 569 movq %rax,%rdi 570 call bcopy 571#endif 572 5731: 574 CHECKNZ((%rsp), %r9) 575 ret 576 577/* 578 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 579 * (old thread is %rbx on entry) 580 * 581 * Don't bother setting up any regs other than %rbp so backtraces 582 * don't die. This restore function is used to bootstrap into the 583 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 584 * switching. 585 * 586 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 587 * This only occurs during system boot so no special handling is 588 * required for migration. 589 * 590 * If we are an AP we have to call ap_init() before jumping to 591 * cpu_idle(). ap_init() will synchronize with the BP and finish 592 * setting up various ncpu-dependant globaldata fields. This may 593 * happen on UP as well as SMP if we happen to be simulating multiple 594 * cpus. 595 */ 596ENTRY(cpu_idle_restore) 597 /* cli */ 598 /* JG xor? */ 599 movl $0,%ebp 600 /* JG push RBP? */ 601 pushq $0 602 cmpl $0,PCPU(cpuid) 603 je 1f 604 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 605 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 606 call ap_init 607 /* sti */ 608 jmp cpu_idle 609 610 /* 611 * cpu 0's idle thread entry for the first time must use normal 612 * lwkt_switch_return() semantics or a pending cpu migration on 613 * thread0 will deadlock. 614 */ 6151: 616 pushq %rax 617 movq %rbx,%rdi 618 call lwkt_switch_return 619 popq %rax 620 jmp cpu_idle 621 622/* 623 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 624 * (old thread is %rbx on entry) 625 * 626 * Don't bother setting up any regs other then %rbp so backtraces 627 * don't die. This restore function is used to bootstrap into an 628 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 629 * after this. 630 * 631 * Because this switch target does not 'return' to lwkt_switch() 632 * we have to call lwkt_switch_return(otd) to clean up otd. 633 * otd is in %ebx. 634 * 635 * Since all of our context is on the stack we are reentrant and 636 * we can release our critical section and enable interrupts early. 637 */ 638ENTRY(cpu_kthread_restore) 639 /*sti*/ 640 movq TD_PCB(%rax),%r13 641 movq $0,%rbp 642 643 /* 644 * rax and rbx come from the switchout code. Call 645 * lwkt_switch_return(otd). 646 * 647 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs. 648 */ 649 pushq %rax 650 movq %rbx,%rdi 651 call lwkt_switch_return 652 popq %rax 653 decl TD_CRITCOUNT(%rax) 654 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 655 movq PCB_RBX(%r13),%rax /* thread function */ 656 /* note: top of stack return address inherited by function */ 657 CHECKNZ(%rax, %r9) 658 jmp *%rax 659 660/* 661 * cpu_lwkt_switch(struct thread *) 662 * 663 * Standard LWKT switching function. Only non-scratch registers are 664 * saved and we don't bother with the MMU state or anything else. 665 * 666 * This function is always called while in a critical section. 667 * 668 * There is a one-instruction window where curthread is the new 669 * thread but %rsp still points to the old thread's stack, but 670 * we are protected by a critical section so it is ok. 671 * 672 * YYY BGL, SPL 673 */ 674ENTRY(cpu_lwkt_switch) 675 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */ 676 /* JG we've got more registers on x86_64 */ 677 pushq %rbx 678 movq PCPU(curthread),%rbx 679 pushq %r12 680 pushq %r13 681 pushq %r14 682 pushq %r15 683 pushfq 684 685#if 1 686 /* 687 * Save the FP state if we have used the FP. Note that calling 688 * npxsave will NULL out PCPU(npxthread). 689 * 690 * We have to deal with the FP state for LWKT threads in case they 691 * happen to get preempted or block while doing an optimized 692 * bzero/bcopy/memcpy. 693 */ 694 cmpq %rbx,PCPU(npxthread) 695 jne 1f 696 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 697 movq TD_SAVEFPU(%rbx),%rdi 698 call npxsave /* do it in a big C function */ 699 movq %r12,%rdi /* restore %rdi */ 7001: 701#endif 702 703 movq %rdi,%rax /* switch to this thread */ 704 pushq $cpu_lwkt_restore 705 movq %rsp,TD_SP(%rbx) 706 movq %rax,PCPU(curthread) 707 movq TD_SP(%rax),%rsp 708 709 /* 710 * %rax contains new thread, %rbx contains old thread. 711 */ 712 CHECKNZ((%rsp), %r9) 713 ret 714 715/* 716 * cpu_lwkt_restore() (current thread in %rax on entry) 717 * 718 * Standard LWKT restore function. This function is always called 719 * while in a critical section. 720 * 721 * Warning: due to preemption the restore function can be used to 722 * 'return' to the original thread. Interrupt disablement must be 723 * protected through the switch so we cannot run splz here. 724 */ 725ENTRY(cpu_lwkt_restore) 726 /* 727 * NOTE: %rbx is the previous thread and %eax is the new thread. 728 * %rbx is retained throughout so we can return it. 729 * 730 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 731 */ 732 movq %rbx,%rax 733 popfq 734 popq %r15 735 popq %r14 736 popq %r13 737 popq %r12 738 popq %rbx 739 popq %rbp 740 ret 741 742/* 743 * bootstrap_idle() 744 * 745 * Make AP become the idle loop. 746 */ 747ENTRY(bootstrap_idle) 748 movq PCPU(curthread),%rax 749 movq %rax,%rbx 750 movq TD_SP(%rax),%rsp 751 ret 752