1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by the University of 52 * California, Berkeley and its contributors. 53 * 4. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 70 */ 71 72//#include "use_npx.h" 73 74#include <sys/rtprio.h> 75 76#include <machine/asmacros.h> 77#include <machine/segments.h> 78 79#include <machine/pmap.h> 80#include <machine/lock.h> 81 82#define CHECKNZ(expr, scratch_reg) \ 83 movq expr, scratch_reg; testq scratch_reg, scratch_reg; jnz 7f; int $3; 7: 84 85#include "assym.s" 86 87#define MPLOCKED lock ; 88 89 .data 90 91 .globl panic 92 .globl lwkt_switch_return 93 94#if defined(SWTCH_OPTIM_STATS) 95 .globl swtch_optim_stats, tlb_flush_count 96swtch_optim_stats: .long 0 /* number of _swtch_optims */ 97tlb_flush_count: .long 0 98#endif 99 100 .text 101 102 103/* 104 * cpu_heavy_switch(struct thread *next_thread) 105 * 106 * Switch from the current thread to a new thread. This entry 107 * is normally called via the thread->td_switch function, and will 108 * only be called when the current thread is a heavy weight process. 109 * 110 * Some instructions have been reordered to reduce pipeline stalls. 111 * 112 * YYY disable interrupts once giant is removed. 113 */ 114ENTRY(cpu_heavy_switch) 115 /* 116 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 117 */ 118 movq PCPU(curthread),%rcx 119 /* On top of the stack is the return adress. */ 120 movq (%rsp),%rax /* (reorder optimization) */ 121 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 122 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 123 movq %rbx,PCB_RBX(%rdx) 124 movq %rsp,PCB_RSP(%rdx) 125 movq %rbp,PCB_RBP(%rdx) 126 movq %r12,PCB_R12(%rdx) 127 movq %r13,PCB_R13(%rdx) 128 movq %r14,PCB_R14(%rdx) 129 movq %r15,PCB_R15(%rdx) 130 131 /* 132 * Clear the cpu bit in the pmap active mask. The restore 133 * function will set the bit in the pmap active mask. 134 * 135 * Special case: when switching between threads sharing the 136 * same vmspace if we avoid clearing the bit we do not have 137 * to reload %cr3 (if we clear the bit we could race page 138 * table ops done by other threads and would have to reload 139 * %cr3, because those ops will not know to IPI us). 140 */ 141 movq %rcx,%rbx /* RBX = oldthread */ 142 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 143 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 144 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 145 testq %r13,%r13 /* might not be a heavy */ 146 jz 1f 147 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 148 je 2f 1491: 150 movq PCPU(other_cpus)+0, %rax 151 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 152 movq PCPU(other_cpus)+8, %rax 153 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 154 movq PCPU(other_cpus)+16, %rax 155 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 156 movq PCPU(other_cpus)+24, %rax 157 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 1582: 159 160 /* 161 * Push the LWKT switch restore function, which resumes a heavy 162 * weight process. Note that the LWKT switcher is based on 163 * TD_SP, while the heavy weight process switcher is based on 164 * PCB_RSP. TD_SP is usually two ints pushed relative to 165 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 166 */ 167 pushfq 168 movq $cpu_heavy_restore, %rax 169 pushq %rax 170 movq %rsp,TD_SP(%rbx) 171 172 /* 173 * Save debug regs if necessary 174 */ 175 movq PCB_FLAGS(%rdx),%rax 176 andq $PCB_DBREGS,%rax 177 jz 1f /* no, skip over */ 178 movq %dr7,%rax /* yes, do the save */ 179 movq %rax,PCB_DR7(%rdx) 180 /* JG correct value? */ 181 andq $0x0000fc00, %rax /* disable all watchpoints */ 182 movq %rax,%dr7 183 movq %dr6,%rax 184 movq %rax,PCB_DR6(%rdx) 185 movq %dr3,%rax 186 movq %rax,PCB_DR3(%rdx) 187 movq %dr2,%rax 188 movq %rax,PCB_DR2(%rdx) 189 movq %dr1,%rax 190 movq %rax,PCB_DR1(%rdx) 191 movq %dr0,%rax 192 movq %rax,PCB_DR0(%rdx) 1931: 194 195#if 1 196 /* 197 * Save the FP state if we have used the FP. Note that calling 198 * npxsave will NULL out PCPU(npxthread). 199 */ 200 cmpq %rbx,PCPU(npxthread) 201 jne 1f 202 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 203 movq TD_SAVEFPU(%rbx),%rdi 204 call npxsave /* do it in a big C function */ 205 movq %r12,%rdi /* restore %rdi */ 2061: 207#endif 208 209 /* 210 * Switch to the next thread, which was passed as an argument 211 * to cpu_heavy_switch(). The argument is in %rdi. 212 * Set the current thread, load the stack pointer, 213 * and 'ret' into the switch-restore function. 214 * 215 * The switch restore function expects the new thread to be in %rax 216 * and the old one to be in %rbx. 217 * 218 * There is a one-instruction window where curthread is the new 219 * thread but %rsp still points to the old thread's stack, but 220 * we are protected by a critical section so it is ok. 221 */ 222 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 223 movq %rax,PCPU(curthread) 224 movq TD_SP(%rax),%rsp 225 CHECKNZ((%rsp), %r9) 226 ret 227 228/* 229 * cpu_exit_switch(struct thread *next) 230 * 231 * The switch function is changed to this when a thread is going away 232 * for good. We have to ensure that the MMU state is not cached, and 233 * we don't bother saving the existing thread state before switching. 234 * 235 * At this point we are in a critical section and this cpu owns the 236 * thread's token, which serves as an interlock until the switchout is 237 * complete. 238 */ 239ENTRY(cpu_exit_switch) 240 /* 241 * Get us out of the vmspace 242 */ 243#if 0 244 movq KPML4phys,%rcx 245 movq %cr3,%rax 246 cmpq %rcx,%rax 247 je 1f 248 /* JG no increment of statistics counters? see cpu_heavy_restore */ 249 movq %rcx,%cr3 2501: 251#endif 252 movq PCPU(curthread),%rbx 253 254 /* 255 * If this is a process/lwp, deactivate the pmap after we've 256 * switched it out. 257 */ 258 movq TD_LWP(%rbx),%rcx 259 testq %rcx,%rcx 260 jz 2f 261 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 262 movq PCPU(other_cpus)+0, %rax 263 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 264 movq PCPU(other_cpus)+8, %rax 265 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 266 movq PCPU(other_cpus)+16, %rax 267 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 268 movq PCPU(other_cpus)+24, %rax 269 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 2702: 271 /* 272 * Switch to the next thread. RET into the restore function, which 273 * expects the new thread in RAX and the old in RBX. 274 * 275 * There is a one-instruction window where curthread is the new 276 * thread but %rsp still points to the old thread's stack, but 277 * we are protected by a critical section so it is ok. 278 */ 279 movq %rdi,%rax 280 movq %rax,PCPU(curthread) 281 movq TD_SP(%rax),%rsp 282 CHECKNZ((%rsp), %r9) 283 ret 284 285/* 286 * cpu_heavy_restore() (current thread in %rax on entry, %rbx is old thread) 287 * 288 * Restore the thread after an LWKT switch. This entry is normally 289 * called via the LWKT switch restore function, which was pulled 290 * off the thread stack and jumped to. 291 * 292 * This entry is only called if the thread was previously saved 293 * using cpu_heavy_switch() (the heavy weight process thread switcher), 294 * or when a new process is initially scheduled. 295 * 296 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 297 * a preemption switch may interrupt the process and then return via 298 * cpu_heavy_restore. 299 * 300 * YYY theoretically we do not have to restore everything here, a lot 301 * of this junk can wait until we return to usermode. But for now 302 * we restore everything. 303 * 304 * YYY the PCB crap is really crap, it makes startup a bitch because 305 * we can't switch away. 306 * 307 * YYY note: spl check is done in mi_switch when it splx()'s. 308 */ 309 310ENTRY(cpu_heavy_restore) 311 popfq 312 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 313 314#if defined(SWTCH_OPTIM_STATS) 315 incl _swtch_optim_stats 316#endif 317 /* 318 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 319 * safely test/reload %cr3 until after we have set the bit in the 320 * pmap (remember, we do not hold the MP lock in the switch code). 321 */ 322 movq TD_LWP(%rax),%rcx 323 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 324 325 movq PCPU(cpumask)+0, %rsi 326 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+0(%rcx) 327 movq PCPU(cpumask)+8, %rsi 328 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+8(%rcx) 329 movq PCPU(cpumask)+16, %rsi 330 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+16(%rcx) 331 movq PCPU(cpumask)+24, %rsi 332 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+24(%rcx) 333 334 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi 335 testl $CPULOCK_EXCL,%esi 336 jz 1f 337 338 movq %rax,%r12 /* save newthread ptr */ 339 movq %rcx,%rdi /* (found to be set) */ 340 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 341 movq %r12,%rax 342 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 3431: 344 /* 345 * Restore the MMU address space. If it is the same as the last 346 * thread we don't have to invalidate the tlb (i.e. reload cr3). 347 * YYY which naturally also means that the PM_ACTIVE bit had better 348 * already have been set before we set it above, check? YYY 349 */ 350#if 0 351 movq %cr3,%rsi 352 movq PCB_CR3(%rdx),%rcx 353 cmpq %rsi,%rcx 354 je 4f 355#if defined(SWTCH_OPTIM_STATS) 356 decl _swtch_optim_stats 357 incl _tlb_flush_count 358#endif 359 movq %rcx,%cr3 3604: 361#endif 362 /* 363 * NOTE: %rbx is the previous thread and %rax is the new thread. 364 * %rbx is retained throughout so we can return it. 365 * 366 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 367 */ 368 369#if 0 370 /* 371 * Deal with the PCB extension, restore the private tss 372 */ 373 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 374 movq $1,%rcx /* maybe mark use of a private tss */ 375 testq %rdi,%rdi 376#if JG 377 jnz 2f 378#endif 379 380 /* JG 381 * Going back to the common_tss. We may need to update TSS_ESP0 382 * which sets the top of the supervisor stack when entering from 383 * usermode. The PCB is at the top of the stack but we need another 384 * 16 bytes to take vm86 into account. 385 */ 386 leaq -16(%rdx),%rcx 387 movq %rcx, PCPU(common_tss) + TSS_RSP0 388 movq %rcx, PCPU(rsp0) 389 390#if JG 391 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 392 je 3f /* already using the common TSS */ 393 394 /* JG? */ 395 subq %rcx,%rcx /* unmark use of private tss */ 396 397 /* 398 * Get the address of the common TSS descriptor for the ltr. 399 * There is no way to get the address of a segment-accessed variable 400 * so we store a self-referential pointer at the base of the per-cpu 401 * data area and add the appropriate offset. 402 */ 403 /* JG movl? */ 404 movq $gd_common_tssd, %rdi 405 /* JG name for "%gs:0"? */ 406 addq %gs:0, %rdi 407 408 /* 409 * Move the correct TSS descriptor into the GDT slot, then reload 410 * ltr. 411 */ 4122: 413 /* JG */ 414 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 415 movq PCPU(tss_gdt), %rcx /* entry in GDT */ 416 movq 0(%rdi), %rax 417 movq %rax, 0(%rcx) 418 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 419 ltr %si 420#endif 421 4223: 423#endif 424#if 0 425 /* 426 * Restore the user %gs and %fs 427 */ 428 movq PCB_FSBASE(%rdx),%r9 429 cmpq PCPU(user_fs),%r9 430 je 4f 431 movq %rdx,%r10 432 movq %r9,PCPU(user_fs) 433 movl $MSR_FSBASE,%ecx 434 movl PCB_FSBASE(%r10),%eax 435 movl PCB_FSBASE+4(%r10),%edx 436 wrmsr 437 movq %r10,%rdx 4384: 439 movq PCB_GSBASE(%rdx),%r9 440 cmpq PCPU(user_gs),%r9 441 je 5f 442 movq %rdx,%r10 443 movq %r9,PCPU(user_gs) 444 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 445 movl PCB_GSBASE(%r10),%eax 446 movl PCB_GSBASE+4(%r10),%edx 447 wrmsr 448 movq %r10,%rdx 4495: 450#endif 451 452 /* 453 * Restore general registers. %rbx is restored later. 454 */ 455 movq PCB_RSP(%rdx), %rsp 456 movq PCB_RBP(%rdx), %rbp 457 movq PCB_R12(%rdx), %r12 458 movq PCB_R13(%rdx), %r13 459 movq PCB_R14(%rdx), %r14 460 movq PCB_R15(%rdx), %r15 461 movq PCB_RIP(%rdx), %rax 462 movq %rax, (%rsp) 463 464#if 0 465 /* 466 * Restore the user LDT if we have one 467 */ 468 cmpl $0, PCB_USERLDT(%edx) 469 jnz 1f 470 movl _default_ldt,%eax 471 cmpl PCPU(currentldt),%eax 472 je 2f 473 lldt _default_ldt 474 movl %eax,PCPU(currentldt) 475 jmp 2f 4761: pushl %edx 477 call set_user_ldt 478 popl %edx 4792: 480#endif 481#if 0 482 /* 483 * Restore the user TLS if we have one 484 */ 485 pushl %edx 486 call set_user_TLS 487 popl %edx 488#endif 489 490 /* 491 * Restore the DEBUG register state if necessary. 492 */ 493 movq PCB_FLAGS(%rdx),%rax 494 andq $PCB_DBREGS,%rax 495 jz 1f /* no, skip over */ 496 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 497 movq %rax,%dr6 498 movq PCB_DR3(%rdx),%rax 499 movq %rax,%dr3 500 movq PCB_DR2(%rdx),%rax 501 movq %rax,%dr2 502 movq PCB_DR1(%rdx),%rax 503 movq %rax,%dr1 504 movq PCB_DR0(%rdx),%rax 505 movq %rax,%dr0 506 movq %dr7,%rax /* load dr7 so as not to disturb */ 507 /* JG correct value? */ 508 andq $0x0000fc00,%rax /* reserved bits */ 509 /* JG we've got more registers on x86_64 */ 510 movq PCB_DR7(%rdx),%rcx 511 /* JG correct value? */ 512 andq $~0x0000fc00,%rcx 513 orq %rcx,%rax 514 movq %rax,%dr7 5151: 516 movq %rbx,%rax 517 movq PCB_RBX(%rdx),%rbx 518 519 CHECKNZ((%rsp), %r9) 520 ret 521 522/* 523 * savectx(struct pcb *pcb) 524 * 525 * Update pcb, saving current processor state. 526 */ 527ENTRY(savectx) 528 /* fetch PCB */ 529 /* JG use %rdi instead of %rcx everywhere? */ 530 movq %rdi,%rcx 531 532 /* caller's return address - child won't execute this routine */ 533 movq (%rsp),%rax 534 movq %rax,PCB_RIP(%rcx) 535 movq %rbx,PCB_RBX(%rcx) 536 movq %rsp,PCB_RSP(%rcx) 537 movq %rbp,PCB_RBP(%rcx) 538 movq %r12,PCB_R12(%rcx) 539 movq %r13,PCB_R13(%rcx) 540 movq %r14,PCB_R14(%rcx) 541 movq %r15,PCB_R15(%rcx) 542 543#if 1 544 /* 545 * If npxthread == NULL, then the npx h/w state is irrelevant and the 546 * state had better already be in the pcb. This is true for forks 547 * but not for dumps (the old book-keeping with FP flags in the pcb 548 * always lost for dumps because the dump pcb has 0 flags). 549 * 550 * If npxthread != NULL, then we have to save the npx h/w state to 551 * npxthread's pcb and copy it to the requested pcb, or save to the 552 * requested pcb and reload. Copying is easier because we would 553 * have to handle h/w bugs for reloading. We used to lose the 554 * parent's npx state for forks by forgetting to reload. 555 */ 556 movq PCPU(npxthread),%rax 557 testq %rax,%rax 558 jz 1f 559 560 pushq %rcx /* target pcb */ 561 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 562 pushq %rax 563 564 movq %rax,%rdi 565 call npxsave 566 567 popq %rax 568 popq %rcx 569 570 movq $PCB_SAVEFPU_SIZE,%rdx 571 leaq PCB_SAVEFPU(%rcx),%rcx 572 movq %rcx,%rsi 573 movq %rax,%rdi 574 call bcopy 575#endif 576 5771: 578 CHECKNZ((%rsp), %r9) 579 ret 580 581/* 582 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 583 * (old thread is %rbx on entry) 584 * 585 * Don't bother setting up any regs other than %rbp so backtraces 586 * don't die. This restore function is used to bootstrap into the 587 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 588 * switching. 589 * 590 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 591 * This only occurs during system boot so no special handling is 592 * required for migration. 593 * 594 * If we are an AP we have to call ap_init() before jumping to 595 * cpu_idle(). ap_init() will synchronize with the BP and finish 596 * setting up various ncpu-dependant globaldata fields. This may 597 * happen on UP as well as SMP if we happen to be simulating multiple 598 * cpus. 599 */ 600ENTRY(cpu_idle_restore) 601 /* cli */ 602 /* JG xor? */ 603 movl $0,%ebp 604 /* JG push RBP? */ 605 pushq $0 606 cmpl $0,PCPU(cpuid) 607 je 1f 608 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 609 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 610 call ap_init 611 /* sti */ 612 jmp cpu_idle 613 614 /* 615 * cpu 0's idle thread entry for the first time must use normal 616 * lwkt_switch_return() semantics or a pending cpu migration on 617 * thread0 will deadlock. 618 */ 6191: 620 pushq %rax 621 movq %rbx,%rdi 622 call lwkt_switch_return 623 popq %rax 624 jmp cpu_idle 625 626/* 627 * cpu_kthread_restore() (current thread is %rax on entry) (one-time execution) 628 * (old thread is %rbx on entry) 629 * 630 * Don't bother setting up any regs other then %rbp so backtraces 631 * don't die. This restore function is used to bootstrap into an 632 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 633 * after this. 634 * 635 * Because this switch target does not 'return' to lwkt_switch() 636 * we have to call lwkt_switch_return(otd) to clean up otd. 637 * otd is in %ebx. 638 * 639 * Since all of our context is on the stack we are reentrant and 640 * we can release our critical section and enable interrupts early. 641 */ 642ENTRY(cpu_kthread_restore) 643 /*sti*/ 644 movq TD_PCB(%rax),%r13 645 movq $0,%rbp 646 647 /* 648 * rax and rbx come from the switchout code. Call 649 * lwkt_switch_return(otd). 650 * 651 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs. 652 */ 653 pushq %rax 654 movq %rbx,%rdi 655 call lwkt_switch_return 656 popq %rax 657 decl TD_CRITCOUNT(%rax) 658 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 659 movq PCB_RBX(%r13),%rax /* thread function */ 660 /* note: top of stack return address inherited by function */ 661 CHECKNZ(%rax, %r9) 662 jmp *%rax 663 664/* 665 * cpu_lwkt_switch(struct thread *) 666 * 667 * Standard LWKT switching function. Only non-scratch registers are 668 * saved and we don't bother with the MMU state or anything else. 669 * 670 * This function is always called while in a critical section. 671 * 672 * There is a one-instruction window where curthread is the new 673 * thread but %rsp still points to the old thread's stack, but 674 * we are protected by a critical section so it is ok. 675 * 676 * YYY BGL, SPL 677 */ 678ENTRY(cpu_lwkt_switch) 679 pushq %rbp /* JG note: GDB hacked to locate ebp relative to td_sp */ 680 /* JG we've got more registers on x86_64 */ 681 pushq %rbx 682 movq PCPU(curthread),%rbx 683 pushq %r12 684 pushq %r13 685 pushq %r14 686 pushq %r15 687 pushfq 688 689#if 1 690 /* 691 * Save the FP state if we have used the FP. Note that calling 692 * npxsave will NULL out PCPU(npxthread). 693 * 694 * We have to deal with the FP state for LWKT threads in case they 695 * happen to get preempted or block while doing an optimized 696 * bzero/bcopy/memcpy. 697 */ 698 cmpq %rbx,PCPU(npxthread) 699 jne 1f 700 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 701 movq TD_SAVEFPU(%rbx),%rdi 702 call npxsave /* do it in a big C function */ 703 movq %r12,%rdi /* restore %rdi */ 7041: 705#endif 706 707 movq %rdi,%rax /* switch to this thread */ 708 pushq $cpu_lwkt_restore 709 movq %rsp,TD_SP(%rbx) 710 movq %rax,PCPU(curthread) 711 movq TD_SP(%rax),%rsp 712 713 /* 714 * %rax contains new thread, %rbx contains old thread. 715 */ 716 CHECKNZ((%rsp), %r9) 717 ret 718 719/* 720 * cpu_lwkt_restore() (current thread in %rax on entry) 721 * 722 * Standard LWKT restore function. This function is always called 723 * while in a critical section. 724 * 725 * Warning: due to preemption the restore function can be used to 726 * 'return' to the original thread. Interrupt disablement must be 727 * protected through the switch so we cannot run splz here. 728 */ 729ENTRY(cpu_lwkt_restore) 730 /* 731 * NOTE: %rbx is the previous thread and %eax is the new thread. 732 * %rbx is retained throughout so we can return it. 733 * 734 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 735 */ 736 movq %rbx,%rax 737 popfq 738 popq %r15 739 popq %r14 740 popq %r13 741 popq %r12 742 popq %rbx 743 popq %rbp 744 ret 745 746/* 747 * bootstrap_idle() 748 * 749 * Make AP become the idle loop. 750 */ 751ENTRY(bootstrap_idle) 752 movq PCPU(curthread),%rax 753 movq %rax,%rbx 754 movq TD_SP(%rax),%rsp 755 ret 756