1 /*- 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*- 32 * Copyright (C) 2001 Benno Rice 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 #include "opt_compat.h" 61 #include "opt_ddb.h" 62 #include "opt_kstack_pages.h" 63 #include "opt_platform.h" 64 65 #include <sys/param.h> 66 #include <sys/proc.h> 67 #include <sys/systm.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/bus.h> 71 #include <sys/cons.h> 72 #include <sys/cpu.h> 73 #include <sys/eventhandler.h> 74 #include <sys/exec.h> 75 #include <sys/imgact.h> 76 #include <sys/kdb.h> 77 #include <sys/kernel.h> 78 #include <sys/ktr.h> 79 #include <sys/linker.h> 80 #include <sys/lock.h> 81 #include <sys/malloc.h> 82 #include <sys/mbuf.h> 83 #include <sys/msgbuf.h> 84 #include <sys/mutex.h> 85 #include <sys/ptrace.h> 86 #include <sys/reboot.h> 87 #include <sys/rwlock.h> 88 #include <sys/signalvar.h> 89 #include <sys/syscallsubr.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysent.h> 92 #include <sys/sysproto.h> 93 #include <sys/ucontext.h> 94 #include <sys/uio.h> 95 #include <sys/vmmeter.h> 96 #include <sys/vnode.h> 97 98 #include <net/netisr.h> 99 100 #include <vm/vm.h> 101 #include <vm/vm_extern.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_map.h> 105 #include <vm/vm_object.h> 106 #include <vm/vm_pager.h> 107 108 #include <machine/altivec.h> 109 #ifndef __powerpc64__ 110 #include <machine/bat.h> 111 #endif 112 #include <machine/cpu.h> 113 #include <machine/elf.h> 114 #include <machine/fpu.h> 115 #include <machine/hid.h> 116 #include <machine/kdb.h> 117 #include <machine/md_var.h> 118 #include <machine/metadata.h> 119 #include <machine/mmuvar.h> 120 #include <machine/pcb.h> 121 #include <machine/reg.h> 122 #include <machine/sigframe.h> 123 #include <machine/spr.h> 124 #include <machine/trap.h> 125 #include <machine/vmparam.h> 126 #include <machine/ofw_machdep.h> 127 128 #include <ddb/ddb.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #ifdef __powerpc64__ 133 #include "mmu_oea64.h" 134 #endif 135 136 #ifndef __powerpc64__ 137 struct bat battable[16]; 138 #endif 139 140 #ifndef __powerpc64__ 141 /* Bits for running on 64-bit systems in 32-bit mode. */ 142 extern void *testppc64, *testppc64size; 143 extern void *restorebridge, *restorebridgesize; 144 extern void *rfid_patch, *rfi_patch1, *rfi_patch2; 145 extern void *trapcode64; 146 147 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; 148 #endif 149 150 extern void *rstcode, *rstcodeend; 151 extern void *trapcode, *trapcodeend; 152 extern void *generictrap, *generictrap64; 153 extern void *alitrap, *aliend; 154 extern void *dsitrap, *dsiend; 155 extern void *decrint, *decrsize; 156 extern void *extint, *extsize; 157 extern void *dblow, *dbend; 158 extern void *imisstrap, *imisssize; 159 extern void *dlmisstrap, *dlmisssize; 160 extern void *dsmisstrap, *dsmisssize; 161 162 extern void *ap_pcpu; 163 164 void aim_cpu_init(vm_offset_t toc); 165 166 void 167 aim_cpu_init(vm_offset_t toc) 168 { 169 size_t trap_offset, trapsize; 170 vm_offset_t trap; 171 register_t msr, scratch; 172 uint8_t *cache_check; 173 int cacheline_warn; 174 #ifndef __powerpc64__ 175 int ppc64; 176 #endif 177 178 trap_offset = 0; 179 cacheline_warn = 0; 180 181 /* General setup for AIM CPUs */ 182 psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 183 184 #ifdef __powerpc64__ 185 psl_kernset |= PSL_SF; 186 if (mfmsr() & PSL_HV) 187 psl_kernset |= PSL_HV; 188 #endif 189 psl_userset = psl_kernset | PSL_PR; 190 #ifdef __powerpc64__ 191 psl_userset32 = psl_userset & ~PSL_SF; 192 #endif 193 194 /* Bits that users aren't allowed to change */ 195 psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); 196 /* 197 * Mask bits from the SRR1 that aren't really the MSR: 198 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) 199 */ 200 psl_userstatic &= ~0x783f0000UL; 201 202 /* Various very early CPU fix ups */ 203 switch (mfpvr() >> 16) { 204 /* 205 * PowerPC 970 CPUs have a misfeature requested by Apple that 206 * makes them pretend they have a 32-byte cacheline. Turn this 207 * off before we measure the cacheline size. 208 */ 209 case IBM970: 210 case IBM970FX: 211 case IBM970MP: 212 case IBM970GX: 213 scratch = mfspr(SPR_HID5); 214 scratch &= ~HID5_970_DCBZ_SIZE_HI; 215 mtspr(SPR_HID5, scratch); 216 break; 217 #ifdef __powerpc64__ 218 case IBMPOWER7: 219 case IBMPOWER7PLUS: 220 case IBMPOWER8: 221 case IBMPOWER8E: 222 /* XXX: get from ibm,slb-size in device tree */ 223 n_slbs = 32; 224 break; 225 #endif 226 } 227 228 /* 229 * Initialize the interrupt tables and figure out our cache line 230 * size and whether or not we need the 64-bit bridge code. 231 */ 232 233 /* 234 * Disable translation in case the vector area hasn't been 235 * mapped (G5). Note that no OFW calls can be made until 236 * translation is re-enabled. 237 */ 238 239 msr = mfmsr(); 240 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); 241 242 /* 243 * Measure the cacheline size using dcbz 244 * 245 * Use EXC_PGM as a playground. We are about to overwrite it 246 * anyway, we know it exists, and we know it is cache-aligned. 247 */ 248 249 cache_check = (void *)EXC_PGM; 250 251 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) 252 cache_check[cacheline_size] = 0xff; 253 254 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); 255 256 /* Find the first byte dcbz did not zero to get the cache line size */ 257 for (cacheline_size = 0; cacheline_size < 0x100 && 258 cache_check[cacheline_size] == 0; cacheline_size++); 259 260 /* Work around psim bug */ 261 if (cacheline_size == 0) { 262 cacheline_warn = 1; 263 cacheline_size = 32; 264 } 265 266 #ifndef __powerpc64__ 267 /* 268 * Figure out whether we need to use the 64 bit PMAP. This works by 269 * executing an instruction that is only legal on 64-bit PPC (mtmsrd), 270 * and setting ppc64 = 0 if that causes a trap. 271 */ 272 273 ppc64 = 1; 274 275 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); 276 __syncicache((void *)EXC_PGM, (size_t)&testppc64size); 277 278 __asm __volatile("\ 279 mfmsr %0; \ 280 mtsprg2 %1; \ 281 \ 282 mtmsrd %0; \ 283 mfsprg2 %1;" 284 : "=r"(scratch), "=r"(ppc64)); 285 286 if (ppc64) 287 cpu_features |= PPC_FEATURE_64; 288 289 /* 290 * Now copy restorebridge into all the handlers, if necessary, 291 * and set up the trap tables. 292 */ 293 294 if (cpu_features & PPC_FEATURE_64) { 295 /* Patch the two instances of rfi -> rfid */ 296 bcopy(&rfid_patch,&rfi_patch1,4); 297 #ifdef KDB 298 /* rfi_patch2 is at the end of dbleave */ 299 bcopy(&rfid_patch,&rfi_patch2,4); 300 #endif 301 } 302 #else /* powerpc64 */ 303 cpu_features |= PPC_FEATURE_64; 304 #endif 305 306 trapsize = (size_t)&trapcodeend - (size_t)&trapcode; 307 308 /* 309 * Copy generic handler into every possible trap. Special cases will get 310 * different ones in a minute. 311 */ 312 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) 313 bcopy(&trapcode, (void *)trap, trapsize); 314 315 #ifndef __powerpc64__ 316 if (cpu_features & PPC_FEATURE_64) { 317 /* 318 * Copy a code snippet to restore 32-bit bridge mode 319 * to the top of every non-generic trap handler 320 */ 321 322 trap_offset += (size_t)&restorebridgesize; 323 bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 324 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 325 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 326 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 327 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 328 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 329 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 330 } 331 #endif 332 333 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - 334 (size_t)&rstcode); 335 336 #ifdef KDB 337 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - 338 (size_t)&dblow); 339 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - 340 (size_t)&dblow); 341 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - 342 (size_t)&dblow); 343 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - 344 (size_t)&dblow); 345 #endif 346 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - 347 (size_t)&alitrap); 348 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - 349 (size_t)&dsitrap); 350 351 #ifdef __powerpc64__ 352 /* Set TOC base so that the interrupt code can get at it */ 353 *((void **)TRAP_GENTRAP) = &generictrap; 354 *((register_t *)TRAP_TOCBASE) = toc; 355 #else 356 /* Set branch address for trap code */ 357 if (cpu_features & PPC_FEATURE_64) 358 *((void **)TRAP_GENTRAP) = &generictrap64; 359 else 360 *((void **)TRAP_GENTRAP) = &generictrap; 361 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; 362 363 /* G2-specific TLB miss helper handlers */ 364 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); 365 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); 366 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); 367 #endif 368 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); 369 370 /* 371 * Restore MSR 372 */ 373 mtmsr(msr); 374 375 /* Warn if cachline size was not determined */ 376 if (cacheline_warn == 1) { 377 printf("WARNING: cacheline size undetermined, setting to 32\n"); 378 } 379 380 /* 381 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority 382 * in case the platform module had a better idea of what we 383 * should do. 384 */ 385 if (cpu_features & PPC_FEATURE_64) 386 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); 387 else 388 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); 389 } 390 391 /* 392 * Shutdown the CPU as much as possible. 393 */ 394 void 395 cpu_halt(void) 396 { 397 398 OF_exit(); 399 } 400 401 int 402 ptrace_single_step(struct thread *td) 403 { 404 struct trapframe *tf; 405 406 tf = td->td_frame; 407 tf->srr1 |= PSL_SE; 408 409 return (0); 410 } 411 412 int 413 ptrace_clear_single_step(struct thread *td) 414 { 415 struct trapframe *tf; 416 417 tf = td->td_frame; 418 tf->srr1 &= ~PSL_SE; 419 420 return (0); 421 } 422 423 void 424 kdb_cpu_clear_singlestep(void) 425 { 426 427 kdb_frame->srr1 &= ~PSL_SE; 428 } 429 430 void 431 kdb_cpu_set_singlestep(void) 432 { 433 434 kdb_frame->srr1 |= PSL_SE; 435 } 436 437 /* 438 * Initialise a struct pcpu. 439 */ 440 void 441 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) 442 { 443 #ifdef __powerpc64__ 444 /* Copy the SLB contents from the current CPU */ 445 memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); 446 #endif 447 } 448 449 #ifndef __powerpc64__ 450 uint64_t 451 va_to_vsid(pmap_t pm, vm_offset_t va) 452 { 453 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 454 } 455 456 #endif 457 458 vm_offset_t 459 pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 460 { 461 462 return (pa); 463 } 464 465 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ 466 void 467 flush_disable_caches(void) 468 { 469 register_t msr; 470 register_t msscr0; 471 register_t cache_reg; 472 volatile uint32_t *memp; 473 uint32_t temp; 474 int i; 475 int x; 476 477 msr = mfmsr(); 478 powerpc_sync(); 479 mtmsr(msr & ~(PSL_EE | PSL_DR)); 480 msscr0 = mfspr(SPR_MSSCR0); 481 msscr0 &= ~MSSCR0_L2PFE; 482 mtspr(SPR_MSSCR0, msscr0); 483 powerpc_sync(); 484 isync(); 485 __asm__ __volatile__("dssall; sync"); 486 powerpc_sync(); 487 isync(); 488 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 489 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 490 __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); 491 492 /* Lock the L1 Data cache. */ 493 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); 494 powerpc_sync(); 495 isync(); 496 497 mtspr(SPR_LDSTCR, 0); 498 499 /* 500 * Perform this in two stages: Flush the cache starting in RAM, then do it 501 * from ROM. 502 */ 503 memp = (volatile uint32_t *)0x00000000; 504 for (i = 0; i < 128 * 1024; i++) { 505 temp = *memp; 506 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 507 memp += 32/sizeof(*memp); 508 } 509 510 memp = (volatile uint32_t *)0xfff00000; 511 x = 0xfe; 512 513 for (; x != 0xff;) { 514 mtspr(SPR_LDSTCR, x); 515 for (i = 0; i < 128; i++) { 516 temp = *memp; 517 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); 518 memp += 32/sizeof(*memp); 519 } 520 x = ((x << 1) | 1) & 0xff; 521 } 522 mtspr(SPR_LDSTCR, 0); 523 524 cache_reg = mfspr(SPR_L2CR); 525 if (cache_reg & L2CR_L2E) { 526 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); 527 mtspr(SPR_L2CR, cache_reg); 528 powerpc_sync(); 529 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); 530 while (mfspr(SPR_L2CR) & L2CR_L2HWF) 531 ; /* Busy wait for cache to flush */ 532 powerpc_sync(); 533 cache_reg &= ~L2CR_L2E; 534 mtspr(SPR_L2CR, cache_reg); 535 powerpc_sync(); 536 mtspr(SPR_L2CR, cache_reg | L2CR_L2I); 537 powerpc_sync(); 538 while (mfspr(SPR_L2CR) & L2CR_L2I) 539 ; /* Busy wait for L2 cache invalidate */ 540 powerpc_sync(); 541 } 542 543 cache_reg = mfspr(SPR_L3CR); 544 if (cache_reg & L3CR_L3E) { 545 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); 546 mtspr(SPR_L3CR, cache_reg); 547 powerpc_sync(); 548 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); 549 while (mfspr(SPR_L3CR) & L3CR_L3HWF) 550 ; /* Busy wait for cache to flush */ 551 powerpc_sync(); 552 cache_reg &= ~L3CR_L3E; 553 mtspr(SPR_L3CR, cache_reg); 554 powerpc_sync(); 555 mtspr(SPR_L3CR, cache_reg | L3CR_L3I); 556 powerpc_sync(); 557 while (mfspr(SPR_L3CR) & L3CR_L3I) 558 ; /* Busy wait for L3 cache invalidate */ 559 powerpc_sync(); 560 } 561 562 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); 563 powerpc_sync(); 564 isync(); 565 566 mtmsr(msr); 567 } 568 569 void 570 cpu_sleep() 571 { 572 static u_quad_t timebase = 0; 573 static register_t sprgs[4]; 574 static register_t srrs[2]; 575 576 jmp_buf resetjb; 577 struct thread *fputd; 578 struct thread *vectd; 579 register_t hid0; 580 register_t msr; 581 register_t saved_msr; 582 583 ap_pcpu = pcpup; 584 585 PCPU_SET(restore, &resetjb); 586 587 saved_msr = mfmsr(); 588 fputd = PCPU_GET(fputhread); 589 vectd = PCPU_GET(vecthread); 590 if (fputd != NULL) 591 save_fpu(fputd); 592 if (vectd != NULL) 593 save_vec(vectd); 594 if (setjmp(resetjb) == 0) { 595 sprgs[0] = mfspr(SPR_SPRG0); 596 sprgs[1] = mfspr(SPR_SPRG1); 597 sprgs[2] = mfspr(SPR_SPRG2); 598 sprgs[3] = mfspr(SPR_SPRG3); 599 srrs[0] = mfspr(SPR_SRR0); 600 srrs[1] = mfspr(SPR_SRR1); 601 timebase = mftb(); 602 powerpc_sync(); 603 flush_disable_caches(); 604 hid0 = mfspr(SPR_HID0); 605 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; 606 powerpc_sync(); 607 isync(); 608 msr = mfmsr() | PSL_POW; 609 mtspr(SPR_HID0, hid0); 610 powerpc_sync(); 611 612 while (1) 613 mtmsr(msr); 614 } 615 platform_smp_timebase_sync(timebase, 0); 616 PCPU_SET(curthread, curthread); 617 PCPU_SET(curpcb, curthread->td_pcb); 618 pmap_activate(curthread); 619 powerpc_sync(); 620 mtspr(SPR_SPRG0, sprgs[0]); 621 mtspr(SPR_SPRG1, sprgs[1]); 622 mtspr(SPR_SPRG2, sprgs[2]); 623 mtspr(SPR_SPRG3, sprgs[3]); 624 mtspr(SPR_SRR0, srrs[0]); 625 mtspr(SPR_SRR1, srrs[1]); 626 mtmsr(saved_msr); 627 if (fputd == curthread) 628 enable_fpu(curthread); 629 if (vectd == curthread) 630 enable_vec(curthread); 631 powerpc_sync(); 632 } 633 634