1/* 2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved. 3 * Copyright (c) 2008 Jordan Gordeev. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Matthew Dillon <dillon@backplane.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Copyright (c) 1990 The Regents of the University of California. 36 * All rights reserved. 37 * 38 * This code is derived from software contributed to Berkeley by 39 * William Jolitz. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $ 66 */ 67 68//#include "use_npx.h" 69 70#include <sys/rtprio.h> 71 72#include <machine/asmacros.h> 73#include <machine/segments.h> 74 75#include <machine/pmap.h> 76#if 0 /* JG */ 77#include <machine_base/apic/apicreg.h> 78#endif 79#include <machine/lock.h> 80 81#include "assym.s" 82 83#define MPLOCKED lock ; 84 85 .data 86 87 .globl panic 88 .globl lwkt_switch_return 89 90#if defined(SWTCH_OPTIM_STATS) 91 .globl swtch_optim_stats, tlb_flush_count 92swtch_optim_stats: .long 0 /* number of _swtch_optims */ 93tlb_flush_count: .long 0 94#endif 95 96 .text 97 98 99/* 100 * cpu_heavy_switch(struct thread *next_thread) 101 * 102 * Switch from the current thread to a new thread. This entry 103 * is normally called via the thread->td_switch function, and will 104 * only be called when the current thread is a heavy weight process. 105 * 106 * Some instructions have been reordered to reduce pipeline stalls. 107 * 108 * YYY disable interrupts once giant is removed. 109 */ 110ENTRY(cpu_heavy_switch) 111 /* 112 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15). 113 */ 114 movq PCPU(curthread),%rcx 115 /* On top of the stack is the return adress. */ 116 movq (%rsp),%rax /* (reorder optimization) */ 117 movq TD_PCB(%rcx),%rdx /* RDX = PCB */ 118 movq %rax,PCB_RIP(%rdx) /* return PC may be modified */ 119 movq %rbx,PCB_RBX(%rdx) 120 movq %rsp,PCB_RSP(%rdx) 121 movq %rbp,PCB_RBP(%rdx) 122 movq %r12,PCB_R12(%rdx) 123 movq %r13,PCB_R13(%rdx) 124 movq %r14,PCB_R14(%rdx) 125 movq %r15,PCB_R15(%rdx) 126 127 /* 128 * Clear the cpu bit in the pmap active mask. The restore 129 * function will set the bit in the pmap active mask. 130 * 131 * Special case: when switching between threads sharing the 132 * same vmspace if we avoid clearing the bit we do not have 133 * to reload %cr3 (if we clear the bit we could race page 134 * table ops done by other threads and would have to reload 135 * %cr3, because those ops will not know to IPI us). 136 */ 137 movq %rcx,%rbx /* RBX = oldthread */ 138 movq TD_LWP(%rcx),%rcx /* RCX = oldlwp */ 139 movq TD_LWP(%rdi),%r13 /* R13 = newlwp */ 140 movq LWP_VMSPACE(%rcx), %rcx /* RCX = oldvmspace */ 141 testq %r13,%r13 /* might not be a heavy */ 142 jz 1f 143 cmpq LWP_VMSPACE(%r13),%rcx /* same vmspace? */ 144 je 2f 145#if CPUMASK_ELEMENTS != 4 146#error "assembly incompatible with cpumask_t" 147#endif 1481: 149 movq PCPU(other_cpus)+0,%rax 150 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 151 movq PCPU(other_cpus)+8,%rax 152 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 153 movq PCPU(other_cpus)+16,%rax 154 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 155 movq PCPU(other_cpus)+24,%rax 156 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 1572: 158 159 /* 160 * Push the LWKT switch restore function, which resumes a heavy 161 * weight process. Note that the LWKT switcher is based on 162 * TD_SP, while the heavy weight process switcher is based on 163 * PCB_RSP. TD_SP is usually two ints pushed relative to 164 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore. 165 */ 166 pushfq 167 cli 168 movq $cpu_heavy_restore, %rax 169 pushq %rax 170 movq %rsp,TD_SP(%rbx) 171 172 /* 173 * Save debug regs if necessary 174 */ 175 movq PCB_FLAGS(%rdx),%rax 176 andq $PCB_DBREGS,%rax 177 jz 1f /* no, skip over */ 178 movq %dr7,%rax /* yes, do the save */ 179 movq %rax,PCB_DR7(%rdx) 180 /* JG correct value? */ 181 andq $0x0000fc00, %rax /* disable all watchpoints */ 182 movq %rax,%dr7 183 movq %dr6,%rax 184 movq %rax,PCB_DR6(%rdx) 185 movq %dr3,%rax 186 movq %rax,PCB_DR3(%rdx) 187 movq %dr2,%rax 188 movq %rax,PCB_DR2(%rdx) 189 movq %dr1,%rax 190 movq %rax,PCB_DR1(%rdx) 191 movq %dr0,%rax 192 movq %rax,PCB_DR0(%rdx) 1931: 194 195#if 1 196 /* 197 * Save the FP state if we have used the FP. Note that calling 198 * npxsave will NULL out PCPU(npxthread). 199 */ 200 cmpq %rbx,PCPU(npxthread) 201 jne 1f 202 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 203 movq TD_SAVEFPU(%rbx),%rdi 204 call npxsave /* do it in a big C function */ 205 movq %r12,%rdi /* restore %rdi */ 2061: 207#endif 208 209 /* 210 * Switch to the next thread, which was passed as an argument 211 * to cpu_heavy_switch(). The argument is in %rdi. 212 * Set the current thread, load the stack pointer, 213 * and 'ret' into the switch-restore function. 214 * 215 * The switch restore function expects the new thread to be in %rax 216 * and the old one to be in %rbx. 217 * 218 * There is a one-instruction window where curthread is the new 219 * thread but %rsp still points to the old thread's stack, but 220 * we are protected by a critical section so it is ok. 221 */ 222 movq %rdi,%rax /* RAX = newtd, RBX = oldtd */ 223 movq %rax,PCPU(curthread) 224 movq TD_SP(%rax),%rsp 225 ret 226 227/* 228 * cpu_exit_switch(struct thread *next) 229 * 230 * The switch function is changed to this when a thread is going away 231 * for good. We have to ensure that the MMU state is not cached, and 232 * we don't bother saving the existing thread state before switching. 233 * 234 * At this point we are in a critical section and this cpu owns the 235 * thread's token, which serves as an interlock until the switchout is 236 * complete. 237 */ 238ENTRY(cpu_exit_switch) 239 /* 240 * Get us out of the vmspace 241 */ 242 movq KPML4phys,%rcx 243 movq %cr3,%rax 244#if 1 245 cmpq %rcx,%rax 246 je 1f 247#endif 248 /* JG no increment of statistics counters? see cpu_heavy_restore */ 249 movq %rcx,%cr3 2501: 251 movq PCPU(curthread),%rbx 252 253 /* 254 * If this is a process/lwp, deactivate the pmap after we've 255 * switched it out. 256 */ 257 movq TD_LWP(%rbx),%rcx 258 testq %rcx,%rcx 259 jz 2f 260 movq LWP_VMSPACE(%rcx), %rcx /* RCX = vmspace */ 261 movq PCPU(other_cpus)+0,%rax 262 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+0(%rcx) 263 movq PCPU(other_cpus)+8,%rax 264 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+8(%rcx) 265 movq PCPU(other_cpus)+16,%rax 266 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+16(%rcx) 267 movq PCPU(other_cpus)+24,%rax 268 MPLOCKED andq %rax, VM_PMAP+PM_ACTIVE+24(%rcx) 2692: 270 /* 271 * Switch to the next thread. RET into the restore function, which 272 * expects the new thread in RAX and the old in RBX. 273 * 274 * There is a one-instruction window where curthread is the new 275 * thread but %rsp still points to the old thread's stack, but 276 * we are protected by a critical section so it is ok. 277 */ 278 cli 279 movq %rdi,%rax 280 movq %rax,PCPU(curthread) 281 movq TD_SP(%rax),%rsp 282 ret 283 284/* 285 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx) 286 * 287 * Restore the thread after an LWKT switch. This entry is normally 288 * called via the LWKT switch restore function, which was pulled 289 * off the thread stack and jumped to. 290 * 291 * This entry is only called if the thread was previously saved 292 * using cpu_heavy_switch() (the heavy weight process thread switcher), 293 * or when a new process is initially scheduled. 294 * 295 * NOTE: The lwp may be in any state, not necessarily LSRUN, because 296 * a preemption switch may interrupt the process and then return via 297 * cpu_heavy_restore. 298 * 299 * YYY theoretically we do not have to restore everything here, a lot 300 * of this junk can wait until we return to usermode. But for now 301 * we restore everything. 302 * 303 * YYY the PCB crap is really crap, it makes startup a bitch because 304 * we can't switch away. 305 * 306 * YYY note: spl check is done in mi_switch when it splx()'s. 307 */ 308 309ENTRY(cpu_heavy_restore) 310 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 311 movq %rdx, PCPU(common_tss) + TSS_RSP0 312 popfq 313 314#if defined(SWTCH_OPTIM_STATS) 315 incl _swtch_optim_stats 316#endif 317 /* 318 * Tell the pmap that our cpu is using the VMSPACE now. We cannot 319 * safely test/reload %cr3 until after we have set the bit in the 320 * pmap. 321 * 322 * We must do an interlocked test of the CPULOCK_EXCL at the same 323 * time. If found to be set we will have to wait for it to clear 324 * and then do a forced reload of %cr3 (even if the value matches). 325 * 326 * XXX When switching between two LWPs sharing the same vmspace 327 * the cpu_heavy_switch() code currently avoids clearing the 328 * cpu bit in PM_ACTIVE. So if the bit is already set we can 329 * avoid checking for the interlock via CPULOCK_EXCL. We currently 330 * do not perform this optimization. 331 */ 332 movq TD_LWP(%rax),%rcx 333 movq LWP_VMSPACE(%rcx),%rcx /* RCX = vmspace */ 334 335#if CPUMASK_ELEMENTS != 4 336#error "assembly incompatible with cpumask_t" 337#endif 338 movq PCPU(cpumask)+0,%rsi /* new contents */ 339 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+0(%rcx) 340 movq PCPU(cpumask)+8,%rsi 341 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+8(%rcx) 342 movq PCPU(cpumask)+16,%rsi 343 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+16(%rcx) 344 movq PCPU(cpumask)+24,%rsi 345 MPLOCKED orq %rsi, VM_PMAP+PM_ACTIVE+24(%rcx) 346 347 movl VM_PMAP+PM_ACTIVE_LOCK(%rcx),%esi 348 testl $CPULOCK_EXCL,%esi 349 jz 1f 350 351 movq %rax,%r12 /* save newthread ptr */ 352 movq %rcx,%rdi /* (found to be set) */ 353 call pmap_interlock_wait /* pmap_interlock_wait(%rdi:vm) */ 354 movq %r12,%rax 355 356 /* 357 * Need unconditional load cr3 358 */ 359 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 360 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 361 jmp 2f /* unconditional reload */ 3621: 363 /* 364 * Restore the MMU address space. If it is the same as the last 365 * thread we don't have to invalidate the tlb (i.e. reload cr3). 366 * YYY which naturally also means that the PM_ACTIVE bit had better 367 * already have been set before we set it above, check? YYY 368 */ 369 movq TD_PCB(%rax),%rdx /* RDX = PCB */ 370 movq %cr3,%rsi /* RSI = current CR3 */ 371 movq PCB_CR3(%rdx),%rcx /* RCX = desired CR3 */ 372 cmpq %rsi,%rcx 373 je 4f 3742: 375#if defined(SWTCH_OPTIM_STATS) 376 decl _swtch_optim_stats 377 incl _tlb_flush_count 378#endif 379 movq %rcx,%cr3 3804: 381 382 /* 383 * NOTE: %rbx is the previous thread and %rax is the new thread. 384 * %rbx is retained throughout so we can return it. 385 * 386 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 387 */ 388 389 /* 390 * Deal with the PCB extension, restore the private tss 391 */ 392 movq PCB_EXT(%rdx),%rdi /* check for a PCB extension */ 393 movq $1,%rcx /* maybe mark use of a private tss */ 394 testq %rdi,%rdi 395#if 0 /* JG */ 396 jnz 2f 397#endif 398 399 /* 400 * Going back to the common_tss. We may need to update TSS_RSP0 401 * which sets the top of the supervisor stack when entering from 402 * usermode. The PCB is at the top of the stack but we need another 403 * 16 bytes to take vm86 into account. 404 */ 405 movq %rdx,%rcx 406 /*leaq -TF_SIZE(%rdx),%rcx*/ 407 movq %rcx, PCPU(common_tss) + TSS_RSP0 408 409#if 0 /* JG */ 410 cmpl $0,PCPU(private_tss) /* don't have to reload if */ 411 je 3f /* already using the common TSS */ 412 413 /* JG? */ 414 subq %rcx,%rcx /* unmark use of private tss */ 415 416 /* 417 * Get the address of the common TSS descriptor for the ltr. 418 * There is no way to get the address of a segment-accessed variable 419 * so we store a self-referential pointer at the base of the per-cpu 420 * data area and add the appropriate offset. 421 */ 422 /* JG movl? */ 423 movq $gd_common_tssd, %rdi 424 /* JG name for "%gs:0"? */ 425 addq %gs:0, %rdi 426 427 /* 428 * Move the correct TSS descriptor into the GDT slot, then reload 429 * ltr. 430 */ 4312: 432 /* JG */ 433 movl %rcx,PCPU(private_tss) /* mark/unmark private tss */ 434 movq PCPU(tss_gdt), %rbx /* entry in GDT */ 435 movq 0(%rdi), %rax 436 movq %rax, 0(%rbx) 437 movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ 438 ltr %si 439#endif 440 4413: 442 /* 443 * Restore the user %gs and %fs 444 */ 445 movq PCB_FSBASE(%rdx),%r9 446 cmpq PCPU(user_fs),%r9 447 je 4f 448 movq %rdx,%r10 449 movq %r9,PCPU(user_fs) 450 movl $MSR_FSBASE,%ecx 451 movl PCB_FSBASE(%r10),%eax 452 movl PCB_FSBASE+4(%r10),%edx 453 wrmsr 454 movq %r10,%rdx 4554: 456 movq PCB_GSBASE(%rdx),%r9 457 cmpq PCPU(user_gs),%r9 458 je 5f 459 movq %rdx,%r10 460 movq %r9,PCPU(user_gs) 461 movl $MSR_KGSBASE,%ecx /* later swapgs moves it to GSBASE */ 462 movl PCB_GSBASE(%r10),%eax 463 movl PCB_GSBASE+4(%r10),%edx 464 wrmsr 465 movq %r10,%rdx 4665: 467 468 /* 469 * Restore general registers. %rbx is restored later. 470 */ 471 movq PCB_RSP(%rdx), %rsp 472 movq PCB_RBP(%rdx), %rbp 473 movq PCB_R12(%rdx), %r12 474 movq PCB_R13(%rdx), %r13 475 movq PCB_R14(%rdx), %r14 476 movq PCB_R15(%rdx), %r15 477 movq PCB_RIP(%rdx), %rax 478 movq %rax, (%rsp) 479 movw $KDSEL,%ax 480 movw %ax,%es 481 482#if 0 /* JG */ 483 /* 484 * Restore the user LDT if we have one 485 */ 486 cmpl $0, PCB_USERLDT(%edx) 487 jnz 1f 488 movl _default_ldt,%eax 489 cmpl PCPU(currentldt),%eax 490 je 2f 491 lldt _default_ldt 492 movl %eax,PCPU(currentldt) 493 jmp 2f 4941: pushl %edx 495 call set_user_ldt 496 popl %edx 4972: 498#endif 499#if 0 /* JG */ 500 /* 501 * Restore the user TLS if we have one 502 */ 503 pushl %edx 504 call set_user_TLS 505 popl %edx 506#endif 507 508 /* 509 * Restore the DEBUG register state if necessary. 510 */ 511 movq PCB_FLAGS(%rdx),%rax 512 andq $PCB_DBREGS,%rax 513 jz 1f /* no, skip over */ 514 movq PCB_DR6(%rdx),%rax /* yes, do the restore */ 515 movq %rax,%dr6 516 movq PCB_DR3(%rdx),%rax 517 movq %rax,%dr3 518 movq PCB_DR2(%rdx),%rax 519 movq %rax,%dr2 520 movq PCB_DR1(%rdx),%rax 521 movq %rax,%dr1 522 movq PCB_DR0(%rdx),%rax 523 movq %rax,%dr0 524 movq %dr7,%rax /* load dr7 so as not to disturb */ 525 /* JG correct value? */ 526 andq $0x0000fc00,%rax /* reserved bits */ 527 /* JG we've got more registers on x86_64 */ 528 movq PCB_DR7(%rdx),%rcx 529 /* JG correct value? */ 530 andq $~0x0000fc00,%rcx 531 orq %rcx,%rax 532 movq %rax,%dr7 533 534 /* 535 * Clear the QUICKRET flag when restoring a user process context 536 * so we don't try to do a quick syscall return. 537 */ 5381: 539 andl $~RQF_QUICKRET,PCPU(reqflags) 540 movq %rbx,%rax 541 movq PCB_RBX(%rdx),%rbx 542 ret 543 544/* 545 * savectx(struct pcb *pcb) 546 * 547 * Update pcb, saving current processor state. 548 */ 549ENTRY(savectx) 550 /* fetch PCB */ 551 /* JG use %rdi instead of %rcx everywhere? */ 552 movq %rdi,%rcx 553 554 /* caller's return address - child won't execute this routine */ 555 movq (%rsp),%rax 556 movq %rax,PCB_RIP(%rcx) 557 558 movq %cr3,%rax 559 movq %rax,PCB_CR3(%rcx) 560 561 movq %rbx,PCB_RBX(%rcx) 562 movq %rsp,PCB_RSP(%rcx) 563 movq %rbp,PCB_RBP(%rcx) 564 movq %r12,PCB_R12(%rcx) 565 movq %r13,PCB_R13(%rcx) 566 movq %r14,PCB_R14(%rcx) 567 movq %r15,PCB_R15(%rcx) 568 569#if 1 570 /* 571 * If npxthread == NULL, then the npx h/w state is irrelevant and the 572 * state had better already be in the pcb. This is true for forks 573 * but not for dumps (the old book-keeping with FP flags in the pcb 574 * always lost for dumps because the dump pcb has 0 flags). 575 * 576 * If npxthread != NULL, then we have to save the npx h/w state to 577 * npxthread's pcb and copy it to the requested pcb, or save to the 578 * requested pcb and reload. Copying is easier because we would 579 * have to handle h/w bugs for reloading. We used to lose the 580 * parent's npx state for forks by forgetting to reload. 581 */ 582 movq PCPU(npxthread),%rax 583 testq %rax,%rax 584 jz 1f 585 586 pushq %rcx /* target pcb */ 587 movq TD_SAVEFPU(%rax),%rax /* originating savefpu area */ 588 pushq %rax 589 590 movq %rax,%rdi 591 call npxsave 592 593 popq %rax 594 popq %rcx 595 596 movq $PCB_SAVEFPU_SIZE,%rdx 597 leaq PCB_SAVEFPU(%rcx),%rcx 598 movq %rcx,%rsi 599 movq %rax,%rdi 600 call bcopy 601#endif 602 6031: 604 ret 605 606/* 607 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution) 608 * 609 * Don't bother setting up any regs other than %rbp so backtraces 610 * don't die. This restore function is used to bootstrap into the 611 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for 612 * switching. 613 * 614 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3. 615 * This only occurs during system boot so no special handling is 616 * required for migration. 617 * 618 * If we are an AP we have to call ap_init() before jumping to 619 * cpu_idle(). ap_init() will synchronize with the BP and finish 620 * setting up various ncpu-dependant globaldata fields. This may 621 * happen on UP as well as SMP if we happen to be simulating multiple 622 * cpus. 623 */ 624ENTRY(cpu_idle_restore) 625 /* cli */ 626 movq KPML4phys,%rcx 627 /* JG xor? */ 628 movq $0,%rbp 629 /* JG push RBP? */ 630 pushq $0 631 movq %rcx,%cr3 632 cmpl $0,PCPU(cpuid) 633 je 1f 634 andl $~TDF_RUNNING,TD_FLAGS(%rbx) 635 orl $TDF_RUNNING,TD_FLAGS(%rax) /* manual, no switch_return */ 636 call ap_init 637 /* 638 * ap_init can decide to enable interrupts early, but otherwise, or if 639 * we are UP, do it here. 640 */ 641 sti 642 jmp cpu_idle 643 644 /* 645 * cpu 0's idle thread entry for the first time must use normal 646 * lwkt_switch_return() semantics or a pending cpu migration on 647 * thread0 will deadlock. 648 */ 6491: 650 sti 651 pushq %rax 652 movq %rbx,%rdi 653 call lwkt_switch_return 654 popq %rax 655 jmp cpu_idle 656 657/* 658 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx) 659 * (one-time execution) 660 * 661 * Don't bother setting up any regs other then %rbp so backtraces 662 * don't die. This restore function is used to bootstrap into an 663 * LWKT based kernel thread only. cpu_lwkt_switch() will be used 664 * after this. 665 * 666 * Because this switch target does not 'return' to lwkt_switch() 667 * we have to call lwkt_switch_return(otd) to clean up otd. 668 * otd is in %ebx. 669 * 670 * Since all of our context is on the stack we are reentrant and 671 * we can release our critical section and enable interrupts early. 672 */ 673ENTRY(cpu_kthread_restore) 674 sti 675 movq KPML4phys,%rcx 676 movq TD_PCB(%rax),%r13 677 xorq %rbp,%rbp 678 movq %rcx,%cr3 679 680 /* 681 * rax and rbx come from the switchout code. Call 682 * lwkt_switch_return(otd). 683 * 684 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs. 685 */ 686 pushq %rax 687 movq %rbx,%rdi 688 call lwkt_switch_return 689 popq %rax 690 decl TD_CRITCOUNT(%rax) 691 movq PCB_R12(%r13),%rdi /* argument to RBX function */ 692 movq PCB_RBX(%r13),%rax /* thread function */ 693 /* note: top of stack return address inherited by function */ 694 jmp *%rax 695 696/* 697 * cpu_lwkt_switch(struct thread *) 698 * 699 * Standard LWKT switching function. Only non-scratch registers are 700 * saved and we don't bother with the MMU state or anything else. 701 * 702 * This function is always called while in a critical section. 703 * 704 * There is a one-instruction window where curthread is the new 705 * thread but %rsp still points to the old thread's stack, but 706 * we are protected by a critical section so it is ok. 707 */ 708ENTRY(cpu_lwkt_switch) 709 pushq %rbp /* JG note: GDB hacked to locate ebp rel to td_sp */ 710 pushq %rbx 711 movq PCPU(curthread),%rbx /* becomes old thread in restore */ 712 pushq %r12 713 pushq %r13 714 pushq %r14 715 pushq %r15 716 pushfq 717 cli 718 719#if 1 720 /* 721 * Save the FP state if we have used the FP. Note that calling 722 * npxsave will NULL out PCPU(npxthread). 723 * 724 * We have to deal with the FP state for LWKT threads in case they 725 * happen to get preempted or block while doing an optimized 726 * bzero/bcopy/memcpy. 727 */ 728 cmpq %rbx,PCPU(npxthread) 729 jne 1f 730 movq %rdi,%r12 /* save %rdi. %r12 is callee-saved */ 731 movq TD_SAVEFPU(%rbx),%rdi 732 call npxsave /* do it in a big C function */ 733 movq %r12,%rdi /* restore %rdi */ 7341: 735#endif 736 737 movq %rdi,%rax /* switch to this thread */ 738 pushq $cpu_lwkt_restore 739 movq %rsp,TD_SP(%rbx) 740 /* 741 * %rax contains new thread, %rbx contains old thread. 742 */ 743 movq %rax,PCPU(curthread) 744 movq TD_SP(%rax),%rsp 745 ret 746 747/* 748 * cpu_lwkt_restore() (current thread in %rax on entry) 749 * 750 * Standard LWKT restore function. This function is always called 751 * while in a critical section. 752 * 753 * Warning: due to preemption the restore function can be used to 754 * 'return' to the original thread. Interrupt disablement must be 755 * protected through the switch so we cannot run splz here. 756 * 757 * YYY we theoretically do not need to load KPML4phys into cr3, but if 758 * so we need a way to detect when the PTD we are using is being 759 * deleted due to a process exiting. 760 */ 761ENTRY(cpu_lwkt_restore) 762 movq KPML4phys,%rcx /* YYY borrow but beware desched/cpuchg/exit */ 763 movq %cr3,%rdx 764#if 1 765 cmpq %rcx,%rdx 766 je 1f 767#endif 768 movq %rcx,%cr3 7691: 770 /* 771 * Safety, clear RSP0 in the tss so it isn't pointing at the 772 * previous thread's kstack (if a heavy weight user thread). 773 * RSP0 should only be used in ring 3 transitions and kernel 774 * threads run in ring 0 so there should be none. 775 */ 776 xorq %rdx,%rdx 777 movq %rdx, PCPU(common_tss) + TSS_RSP0 778 779 /* 780 * NOTE: %rbx is the previous thread and %rax is the new thread. 781 * %rbx is retained throughout so we can return it. 782 * 783 * lwkt_switch[_return] is responsible for handling TDF_RUNNING. 784 */ 785 movq %rbx,%rax 786 popfq 787 popq %r15 788 popq %r14 789 popq %r13 790 popq %r12 791 popq %rbx 792 popq %rbp 793 ret 794