1/* 2 * Copyright (c) 1992 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Digital Equipment Corporation and Ralph Campbell. 7 * 8 * %sccs.include.redist.c% 9 * 10 * Copyright (C) 1989 Digital Equipment Corporation. 11 * Permission to use, copy, modify, and distribute this software and 12 * its documentation for any purpose and without fee is hereby granted, 13 * provided that the above copyright notice appears in all copies. 14 * Digital Equipment Corporation makes no representations about the 15 * suitability of this software for any purpose. It is provided "as is" 16 * without express or implied warranty. 17 * 18 * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, 19 * v 1.1 89/07/11 17:55:04 nelson Exp $ SPRITE (DECWRL) 20 * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, 21 * v 9.2 90/01/29 18:00:39 shirriff Exp $ SPRITE (DECWRL) 22 * from: $Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, 23 * v 1.1 89/07/10 14:27:41 nelson Exp $ SPRITE (DECWRL) 24 * 25 * @(#)locore.s 7.14 (Berkeley) 04/05/93 26 */ 27 28/* 29 * Contains code that is the first executed at boot time plus 30 * assembly language support routines. 31 */ 32 33#include <sys/errno.h> 34#include <sys/syscall.h> 35 36#include <machine/param.h> 37#include <machine/psl.h> 38#include <machine/reg.h> 39#include <machine/machAsmDefs.h> 40#include <machine/pte.h> 41 42#include "assym.h" 43 44 .set noreorder 45 46/* 47 * Amount to take off of the stack for the benefit of the debugger. 48 */ 49#define START_FRAME ((4 * 4) + 4 + 4) 50 51 .globl start 52start: 53 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 54 li t1, MACH_RESERVED_ADDR # invalid address 55 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 56 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 57/* 58 * Clear the TLB (just to be safe). 59 * Align the starting value (t1), the increment (t2) and the upper bound (t3). 60 */ 61 move t1, zero 62 li t2, 1 << VMMACH_TLB_INDEX_SHIFT 63 li t3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 641: 65 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. 66 addu t1, t1, t2 # Increment index. 67 bne t1, t3, 1b # NB: always executes next 68 tlbwi # Write the TLB entry. 69 70 la sp, start - START_FRAME 71 # la gp, _gp 72 sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger 73 jal mach_init # mach_init(argc, argv, envp) 74 sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger 75 76 li t0, MACH_SR_COP_1_BIT # Disable interrupts and 77 mtc0 t0, MACH_COP_0_STATUS_REG # enable the coprocessor 78 li sp, KERNELSTACK - START_FRAME # switch to standard stack 79 mfc0 t0, MACH_COP_0_PRID # read processor ID register 80 cfc1 t1, MACH_FPC_ID # read FPU ID register 81 sw t0, cpu # save PRID register 82 sw t1, fpu # save FPU ID register 83 jal main # main() 84 nop 85 86/* proc[1] == /etc/init now running here; run icode */ 87 li v0, PSL_USERSET 88 mtc0 v0, MACH_COP_0_STATUS_REG # switch to user mode 89 li v0, VM_MIN_ADDRESS 90 j v0 # jump to icode 91 rfe 92 93/* 94 * GCC2 seems to want to call __main in main() for some reason. 95 */ 96LEAF(__main) 97 j ra 98 nop 99END(__main) 100 101/* 102 * This code is copied to user data space as the first program to run. 103 * Basically, it just calls execve(); 104 */ 105 .globl icode 106icode: 107 li a1, VM_MIN_ADDRESS + (9 * 4) # address of 'icode_argv' 108 addu a0, a1, (3 * 4) # address of 'icode_fname' 109 move a2, zero # no environment 110 li v0, 59 # code for execve system call 111 syscall 112 li v0, 1 # code for exit system call 113 syscall # execve failed: call exit() 1141: b 1b # loop if exit returns 115 nop 116icode_argv: 117 .word VM_MIN_ADDRESS + (12 * 4) # address of 'icode_fname' 118 .word VM_MIN_ADDRESS + (15 * 4) # address of 'icodeEnd' 119 .word 0 120icode_fname: 121 .asciiz "/sbin/init" # occupies 3 words 122 .align 2 123 .globl icodeEnd 124icodeEnd: 125 126 .data 127 .align 2 128 .globl szicode 129szicode: 130 .word (9 + 3 + 3) * 4 # compute icodeEnd - icode 131 .text 132 133/* 134 * This code is copied the user's stack for returning from signal handlers 135 * (see sendsig() and sigreturn()). We have to compute the address 136 * of the sigcontext struct for the sigreturn call. 137 */ 138 .globl sigcode 139sigcode: 140 addu a0, sp, 16 # address of sigcontext 141 li v0, SYS_sigreturn # sigreturn(scp) 142 syscall 143 break 0 # just in case sigreturn fails 144 .globl esigcode 145esigcode: 146 147/* 148 * Primitives 149 */ 150 151/* 152 * This table is indexed by u.u_pcb.pcb_onfault in trap(). 153 * The reason for using this table rather than storing an address in 154 * u.u_pcb.pcb_onfault is simply to make the code faster. 155 */ 156 .globl onfault_table 157 .data 158 .align 2 159onfault_table: 160 .word 0 # invalid index number 161#define BADERR 1 162 .word baderr 163#define COPYERR 2 164 .word copyerr 165#define FSWBERR 3 166 .word fswberr 167#define FSWINTRBERR 4 168 .word fswintrberr 169#ifdef KADB 170#define KADBERR 5 171 .word kadberr 172#endif 173 .text 174 175/* 176 * See if access to addr with a len type instruction causes a machine check. 177 * len is length of access (1=byte, 2=short, 4=long) 178 * 179 * badaddr(addr, len) 180 * char *addr; 181 * int len; 182 */ 183LEAF(badaddr) 184 li v0, BADERR 185 bne a1, 1, 2f 186 sw v0, UADDR+U_PCB_ONFAULT 187 b 5f 188 lbu v0, (a0) 1892: 190 bne a1, 2, 4f 191 nop 192 b 5f 193 lhu v0, (a0) 1944: 195 lw v0, (a0) 1965: 197 sw zero, UADDR+U_PCB_ONFAULT 198 j ra 199 move v0, zero # made it w/o errors 200baderr: 201 j ra 202 li v0, 1 # trap sends us here 203END(badaddr) 204 205/* 206 * netorder = htonl(hostorder) 207 * hostorder = ntohl(netorder) 208 */ 209LEAF(htonl) # a0 = 0x11223344, return 0x44332211 210ALEAF(ntohl) 211 srl v1, a0, 24 # v1 = 0x00000011 212 sll v0, a0, 24 # v0 = 0x44000000 213 or v0, v0, v1 214 and v1, a0, 0xff00 215 sll v1, v1, 8 # v1 = 0x00330000 216 or v0, v0, v1 217 srl v1, a0, 8 218 and v1, v1, 0xff00 # v1 = 0x00002200 219 j ra 220 or v0, v0, v1 221END(htonl) 222 223/* 224 * netorder = htons(hostorder) 225 * hostorder = ntohs(netorder) 226 */ 227LEAF(htons) 228ALEAF(ntohs) 229 srl v0, a0, 8 230 and v0, v0, 0xff 231 sll v1, a0, 8 232 and v1, v1, 0xff00 233 j ra 234 or v0, v0, v1 235END(htons) 236 237/* 238 * bit = ffs(value) 239 */ 240LEAF(ffs) 241 beq a0, zero, 2f 242 move v0, zero 2431: 244 and v1, a0, 1 # bit set? 245 addu v0, v0, 1 246 beq v1, zero, 1b # no, continue 247 srl a0, a0, 1 2482: 249 j ra 250 nop 251END(ffs) 252 253/* 254 * strlen(str) 255 */ 256LEAF(strlen) 257 addu v1, a0, 1 2581: 259 lb v0, 0(a0) # get byte from string 260 addu a0, a0, 1 # increment pointer 261 bne v0, zero, 1b # continue if not end 262 nop 263 j ra 264 subu v0, a0, v1 # compute length - 1 for '\0' char 265END(strlen) 266 267/* 268 * NOTE: this version assumes unsigned chars in order to be "8 bit clean". 269 */ 270LEAF(strcmp) 2711: 272 lbu t0, 0(a0) # get two bytes and compare them 273 lbu t1, 0(a1) 274 beq t0, zero, LessOrEq # end of first string? 275 nop 276 bne t0, t1, NotEq 277 nop 278 lbu t0, 1(a0) # unroll loop 279 lbu t1, 1(a1) 280 beq t0, zero, LessOrEq # end of first string? 281 addu a0, a0, 2 282 beq t0, t1, 1b 283 addu a1, a1, 2 284NotEq: 285 j ra 286 subu v0, t0, t1 287LessOrEq: 288 j ra 289 subu v0, zero, t1 290END(strcmp) 291 292/* 293 * bzero(s1, n) 294 */ 295LEAF(bzero) 296ALEAF(blkclr) 297 blt a1, 12, smallclr # small amount to clear? 298 subu a3, zero, a0 # compute # bytes to word align address 299 and a3, a3, 3 300 beq a3, zero, 1f # skip if word aligned 301 subu a1, a1, a3 # subtract from remaining count 302 swr zero, 0(a0) # clear 1, 2, or 3 bytes to align 303 addu a0, a0, a3 3041: 305 and v0, a1, 3 # compute number of words left 306 subu a3, a1, v0 307 move a1, v0 308 addu a3, a3, a0 # compute ending address 3092: 310 addu a0, a0, 4 # clear words 311 bne a0, a3, 2b # unrolling loop does not help 312 sw zero, -4(a0) # since we are limited by memory speed 313smallclr: 314 ble a1, zero, 2f 315 addu a3, a1, a0 # compute ending address 3161: 317 addu a0, a0, 1 # clear bytes 318 bne a0, a3, 1b 319 sb zero, -1(a0) 3202: 321 j ra 322 nop 323END(bzero) 324 325/* 326 * bcmp(s1, s2, n) 327 */ 328LEAF(bcmp) 329 blt a2, 16, smallcmp # is it worth any trouble? 330 xor v0, a0, a1 # compare low two bits of addresses 331 and v0, v0, 3 332 subu a3, zero, a1 # compute # bytes to word align address 333 bne v0, zero, unalignedcmp # not possible to align addresses 334 and a3, a3, 3 335 336 beq a3, zero, 1f 337 subu a2, a2, a3 # subtract from remaining count 338 move v0, v1 # init v0,v1 so unmodified bytes match 339 lwr v0, 0(a0) # read 1, 2, or 3 bytes 340 lwr v1, 0(a1) 341 addu a1, a1, a3 342 bne v0, v1, nomatch 343 addu a0, a0, a3 3441: 345 and a3, a2, ~3 # compute number of whole words left 346 subu a2, a2, a3 # which has to be >= (16-3) & ~3 347 addu a3, a3, a0 # compute ending address 3482: 349 lw v0, 0(a0) # compare words 350 lw v1, 0(a1) 351 addu a0, a0, 4 352 bne v0, v1, nomatch 353 addu a1, a1, 4 354 bne a0, a3, 2b 355 nop 356 b smallcmp # finish remainder 357 nop 358unalignedcmp: 359 beq a3, zero, 2f 360 subu a2, a2, a3 # subtract from remaining count 361 addu a3, a3, a0 # compute ending address 3621: 363 lbu v0, 0(a0) # compare bytes until a1 word aligned 364 lbu v1, 0(a1) 365 addu a0, a0, 1 366 bne v0, v1, nomatch 367 addu a1, a1, 1 368 bne a0, a3, 1b 369 nop 3702: 371 and a3, a2, ~3 # compute number of whole words left 372 subu a2, a2, a3 # which has to be >= (16-3) & ~3 373 addu a3, a3, a0 # compute ending address 3743: 375 lwr v0, 0(a0) # compare words a0 unaligned, a1 aligned 376 lwl v0, 3(a0) 377 lw v1, 0(a1) 378 addu a0, a0, 4 379 bne v0, v1, nomatch 380 addu a1, a1, 4 381 bne a0, a3, 3b 382 nop 383smallcmp: 384 ble a2, zero, match 385 addu a3, a2, a0 # compute ending address 3861: 387 lbu v0, 0(a0) 388 lbu v1, 0(a1) 389 addu a0, a0, 1 390 bne v0, v1, nomatch 391 addu a1, a1, 1 392 bne a0, a3, 1b 393 nop 394match: 395 j ra 396 move v0, zero 397nomatch: 398 j ra 399 li v0, 1 400END(bcmp) 401 402/* 403 * {ov}bcopy(from, to, len) 404 */ 405LEAF(bcopy) 406ALEAF(ovbcopy) 407 addu t0, a0, a2 # t0 = end of s1 region 408 sltu t1, a1, t0 409 sltu t2, a0, a1 410 and t1, t1, t2 # t1 = true if from < to < (from+len) 411 beq t1, zero, forward # non overlapping, do forward copy 412 slt t2, a2, 12 # check for small copy 413 414 ble a2, zero, 2f 415 addu t1, a1, a2 # t1 = end of to region 4161: 417 lb v0, -1(t0) # copy bytes backwards, 418 subu t0, t0, 1 # doesnt happen often so do slow way 419 subu t1, t1, 1 420 bne t0, a0, 1b 421 sb v0, 0(t1) 4222: 423 j ra 424 nop 425forward: 426 bne t2, zero, smallcpy # do a small bcopy 427 xor v0, a0, a1 # compare low two bits of addresses 428 and v0, v0, 3 429 subu a3, zero, a1 # compute # bytes to word align address 430 beq v0, zero, aligned # addresses can be word aligned 431 and a3, a3, 3 432 433 beq a3, zero, 1f 434 subu a2, a2, a3 # subtract from remaining count 435 lwr v0, 0(a0) # get next 4 bytes (unaligned) 436 lwl v0, 3(a0) 437 addu a0, a0, a3 438 swr v0, 0(a1) # store 1, 2, or 3 bytes to align a1 439 addu a1, a1, a3 4401: 441 and v0, a2, 3 # compute number of words left 442 subu a3, a2, v0 443 move a2, v0 444 addu a3, a3, a0 # compute ending address 4452: 446 lwr v0, 0(a0) # copy words a0 unaligned, a1 aligned 447 lwl v0, 3(a0) 448 addu a0, a0, 4 449 addu a1, a1, 4 450 bne a0, a3, 2b 451 sw v0, -4(a1) 452 b smallcpy 453 nop 454aligned: 455 beq a3, zero, 1f 456 subu a2, a2, a3 # subtract from remaining count 457 lwr v0, 0(a0) # copy 1, 2, or 3 bytes to align 458 addu a0, a0, a3 459 swr v0, 0(a1) 460 addu a1, a1, a3 4611: 462 and v0, a2, 3 # compute number of whole words left 463 subu a3, a2, v0 464 move a2, v0 465 addu a3, a3, a0 # compute ending address 4662: 467 lw v0, 0(a0) # copy words 468 addu a0, a0, 4 469 addu a1, a1, 4 470 bne a0, a3, 2b 471 sw v0, -4(a1) 472smallcpy: 473 ble a2, zero, 2f 474 addu a3, a2, a0 # compute ending address 4751: 476 lbu v0, 0(a0) # copy bytes 477 addu a0, a0, 1 478 addu a1, a1, 1 479 bne a0, a3, 1b 480 sb v0, -1(a1) 4812: 482 j ra 483 move v0, zero 484END(bcopy) 485 486/* 487 * Copy a null terminated string within the kernel address space. 488 * Maxlength may be null if count not wanted. 489 * copystr(fromaddr, toaddr, maxlength, &lencopied) 490 * caddr_t fromaddr; 491 * caddr_t toaddr; 492 * u_int maxlength; 493 * u_int *lencopied; 494 */ 495LEAF(copystr) 496 move t2, a2 # Save the number of bytes 4971: 498 lbu t0, 0(a0) 499 subu a2, a2, 1 500 beq t0, zero, 2f 501 sb t0, 0(a1) 502 addu a0, a0, 1 503 bne a2, zero, 1b 504 addu a1, a1, 1 5052: 506 beq a3, zero, 3f 507 subu a2, t2, a2 # compute length copied 508 sw a2, 0(a3) 5093: 510 j ra 511 move v0, zero 512END(copystr) 513 514/* 515 * Copy a null terminated string from the user address space into 516 * the kernel address space. 517 * 518 * copyinstr(fromaddr, toaddr, maxlength, &lencopied) 519 * caddr_t fromaddr; 520 * caddr_t toaddr; 521 * u_int maxlength; 522 * u_int *lencopied; 523 */ 524NON_LEAF(copyinstr, STAND_FRAME_SIZE, ra) 525 subu sp, sp, STAND_FRAME_SIZE 526 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 527 sw ra, STAND_RA_OFFSET(sp) 528 blt a0, zero, copyerr # make sure address is in user space 529 li v0, COPYERR 530 jal copystr 531 sw v0, UADDR+U_PCB_ONFAULT 532 lw ra, STAND_RA_OFFSET(sp) 533 sw zero, UADDR+U_PCB_ONFAULT 534 addu sp, sp, STAND_FRAME_SIZE 535 j ra 536 move v0, zero 537END(copyinstr) 538 539/* 540 * Copy a null terminated string from the kernel address space into 541 * the user address space. 542 * 543 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied) 544 * caddr_t fromaddr; 545 * caddr_t toaddr; 546 * u_int maxlength; 547 * u_int *lencopied; 548 */ 549NON_LEAF(copyoutstr, STAND_FRAME_SIZE, ra) 550 subu sp, sp, STAND_FRAME_SIZE 551 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 552 sw ra, STAND_RA_OFFSET(sp) 553 blt a1, zero, copyerr # make sure address is in user space 554 li v0, COPYERR 555 jal copystr 556 sw v0, UADDR+U_PCB_ONFAULT 557 lw ra, STAND_RA_OFFSET(sp) 558 sw zero, UADDR+U_PCB_ONFAULT 559 addu sp, sp, STAND_FRAME_SIZE 560 j ra 561 move v0, zero 562END(copyoutstr) 563 564/* 565 * Copy specified amount of data from user space into the kernel 566 * copyin(from, to, len) 567 * caddr_t *from; (user source address) 568 * caddr_t *to; (kernel destination address) 569 * unsigned len; 570 */ 571NON_LEAF(copyin, STAND_FRAME_SIZE, ra) 572 subu sp, sp, STAND_FRAME_SIZE 573 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 574 sw ra, STAND_RA_OFFSET(sp) 575 blt a0, zero, copyerr # make sure address is in user space 576 li v0, COPYERR 577 jal bcopy 578 sw v0, UADDR+U_PCB_ONFAULT 579 lw ra, STAND_RA_OFFSET(sp) 580 sw zero, UADDR+U_PCB_ONFAULT 581 addu sp, sp, STAND_FRAME_SIZE 582 j ra 583 move v0, zero 584END(copyin) 585 586/* 587 * Copy specified amount of data from kernel to the user space 588 * copyout(from, to, len) 589 * caddr_t *from; (kernel source address) 590 * caddr_t *to; (user destination address) 591 * unsigned len; 592 */ 593NON_LEAF(copyout, STAND_FRAME_SIZE, ra) 594 subu sp, sp, STAND_FRAME_SIZE 595 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 596 sw ra, STAND_RA_OFFSET(sp) 597 blt a1, zero, copyerr # make sure address is in user space 598 li v0, COPYERR 599 jal bcopy 600 sw v0, UADDR+U_PCB_ONFAULT 601 lw ra, STAND_RA_OFFSET(sp) 602 sw zero, UADDR+U_PCB_ONFAULT 603 addu sp, sp, STAND_FRAME_SIZE 604 j ra 605 move v0, zero 606END(copyout) 607 608LEAF(copyerr) 609 lw ra, STAND_RA_OFFSET(sp) 610 sw zero, UADDR+U_PCB_ONFAULT 611 addu sp, sp, STAND_FRAME_SIZE 612 j ra 613 li v0, EFAULT # return error 614END(copyerr) 615 616/* 617 * Copy data to the DMA buffer. 618 * The DMA bufffer can only be written one short at a time 619 * (and takes ~14 cycles). 620 * 621 * CopyToBuffer(src, dst, length) 622 * u_short *src; NOTE: must be short aligned 623 * u_short *dst; 624 * int length; 625 */ 626LEAF(CopyToBuffer) 627 blez a2, 2f 628 nop 6291: 630 lhu t0, 0(a0) # read 2 bytes of data 631 subu a2, a2, 2 632 addu a0, a0, 2 633 addu a1, a1, 4 634 bgtz a2, 1b 635 sh t0, -4(a1) # write 2 bytes of data to buffer 6362: 637 j ra 638 nop 639END(CopyToBuffer) 640 641/* 642 * Copy data from the DMA buffer. 643 * The DMA bufffer can only be read one short at a time 644 * (and takes ~12 cycles). 645 * 646 * CopyFromBuffer(src, dst, length) 647 * u_short *src; 648 * char *dst; 649 * int length; 650 */ 651LEAF(CopyFromBuffer) 652 and t0, a1, 1 # test for aligned dst 653 beq t0, zero, 3f 654 nop 655 blt a2, 2, 7f # at least 2 bytes to copy? 656 nop 6571: 658 lhu t0, 0(a0) # read 2 bytes of data from buffer 659 addu a0, a0, 4 # keep buffer pointer word aligned 660 addu a1, a1, 2 661 subu a2, a2, 2 662 sb t0, -2(a1) 663 srl t0, t0, 8 664 bge a2, 2, 1b 665 sb t0, -1(a1) 6663: 667 blt a2, 2, 7f # at least 2 bytes to copy? 668 nop 6696: 670 lhu t0, 0(a0) # read 2 bytes of data from buffer 671 addu a0, a0, 4 # keep buffer pointer word aligned 672 addu a1, a1, 2 673 subu a2, a2, 2 674 bge a2, 2, 6b 675 sh t0, -2(a1) 6767: 677 ble a2, zero, 9f # done? 678 nop 679 lhu t0, 0(a0) # copy one more byte 680 nop 681 sb t0, 0(a1) 6829: 683 j ra 684 nop 685END(CopyFromBuffer) 686 687/* 688 * Copy the kernel stack to the new process and save the current context so 689 * the new process will return nonzero when it is resumed by cpu_swtch(). 690 * 691 * copykstack(up) 692 * struct user *up; 693 */ 694LEAF(copykstack) 695 subu v0, sp, UADDR # compute offset into stack 696 addu v0, v0, a0 # v0 = new stack address 697 move v1, sp # v1 = old stack address 698 li t1, KERNELSTACK 6991: 700 lw t0, 0(v1) # copy stack data 701 addu v1, v1, 4 702 sw t0, 0(v0) 703 bne v1, t1, 1b 704 addu v0, v0, 4 705 /* FALLTHROUGH */ 706/* 707 * Save registers and state so we can do a longjmp later. 708 * Note: this only works if p != curproc since 709 * cpu_swtch() will copy over pcb_context. 710 * 711 * savectx(up) 712 * struct user *up; 713 */ 714ALEAF(savectx) 715 sw s0, U_PCB_CONTEXT+0(a0) 716 sw s1, U_PCB_CONTEXT+4(a0) 717 sw s2, U_PCB_CONTEXT+8(a0) 718 sw s3, U_PCB_CONTEXT+12(a0) 719 mfc0 v0, MACH_COP_0_STATUS_REG 720 sw s4, U_PCB_CONTEXT+16(a0) 721 sw s5, U_PCB_CONTEXT+20(a0) 722 sw s6, U_PCB_CONTEXT+24(a0) 723 sw s7, U_PCB_CONTEXT+28(a0) 724 sw sp, U_PCB_CONTEXT+32(a0) 725 sw s8, U_PCB_CONTEXT+36(a0) 726 sw ra, U_PCB_CONTEXT+40(a0) 727 sw v0, U_PCB_CONTEXT+44(a0) 728 j ra 729 move v0, zero 730END(copykstack) 731 732/* 733 * _whichqs tells which of the 32 queues _qs 734 * have processes in them. Setrq puts processes into queues, Remrq 735 * removes them from queues. The running process is on no queue, 736 * other processes are on a queue related to p->p_pri, divided by 4 737 * actually to shrink the 0-127 range of priorities into the 32 available 738 * queues. 739 */ 740 741/* 742 * setrq(p) 743 * proc *p; 744 * 745 * Call should be made at splclock(), and p->p_stat should be SRUN. 746 */ 747NON_LEAF(setrq, STAND_FRAME_SIZE, ra) 748 subu sp, sp, STAND_FRAME_SIZE 749 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 750 lw t0, P_RLINK(a0) ## firewall: p->p_rlink must be 0 751 sw ra, STAND_RA_OFFSET(sp) ## 752 beq t0, zero, 1f ## 753 lbu t0, P_PRI(a0) # put on queue which is p->p_pri / 4 754 PANIC("setrq") ## 7551: 756 li t1, 1 # compute corresponding bit 757 srl t0, t0, 2 # compute index into 'whichqs' 758 sll t1, t1, t0 759 lw t2, whichqs # set corresponding bit 760 nop 761 or t2, t2, t1 762 sw t2, whichqs 763 sll t0, t0, 3 # compute index into 'qs' 764 la t1, qs 765 addu t0, t0, t1 # t0 = qp = &qs[pri >> 2] 766 lw t1, P_RLINK(t0) # t1 = qp->ph_rlink 767 sw t0, P_LINK(a0) # p->p_link = qp 768 sw t1, P_RLINK(a0) # p->p_rlink = qp->ph_rlink 769 sw a0, P_LINK(t1) # p->p_rlink->p_link = p; 770 sw a0, P_RLINK(t0) # qp->ph_rlink = p 771 j ra 772 addu sp, sp, STAND_FRAME_SIZE 773END(setrq) 774 775/* 776 * Remrq(p) 777 * 778 * Call should be made at splclock(). 779 */ 780NON_LEAF(remrq, STAND_FRAME_SIZE, ra) 781 subu sp, sp, STAND_FRAME_SIZE 782 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 783 lbu t0, P_PRI(a0) # get from queue which is p->p_pri / 4 784 li t1, 1 # compute corresponding bit 785 srl t0, t0, 2 # compute index into 'whichqs' 786 lw t2, whichqs # check corresponding bit 787 sll t1, t1, t0 788 and v0, t2, t1 789 sw ra, STAND_RA_OFFSET(sp) ## 790 bne v0, zero, 1f ## 791 lw v0, P_RLINK(a0) # v0 = p->p_rlink 792 PANIC("remrq") ## it wasnt recorded to be on its q 7931: 794 lw v1, P_LINK(a0) # v1 = p->p_link 795 nop 796 sw v1, P_LINK(v0) # p->p_rlink->p_link = p->p_link; 797 sw v0, P_RLINK(v1) # p->p_link->p_rlink = p->r_rlink 798 sll t0, t0, 3 # compute index into 'qs' 799 la v0, qs 800 addu t0, t0, v0 # t0 = qp = &qs[pri >> 2] 801 lw v0, P_LINK(t0) # check if queue empty 802 nop 803 bne v0, t0, 2f # No. qp->ph_link != qp 804 nop 805 xor t2, t2, t1 # clear corresponding bit in 'whichqs' 806 sw t2, whichqs 8072: 808 sw zero, P_RLINK(a0) ## for firewall checking 809 j ra 810 addu sp, sp, STAND_FRAME_SIZE 811END(remrq) 812 813/* 814 * swtch_exit() 815 * 816 * At exit of a process, do a cpu_swtch for the last time. 817 * The mapping of the pcb at p->p_addr has already been deleted, 818 * and the memory for the pcb+stack has been freed. 819 * All interrupts should be blocked at this point. 820 */ 821LEAF(swtch_exit) 822 la v0, nullproc # save state into garbage proc 823 lw t0, P_UPTE+0(v0) # t0 = first u. pte 824 lw t1, P_UPTE+4(v0) # t1 = 2nd u. pte 825 li v0, UADDR # v0 = first HI entry 826 mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register 827 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 828 mtc0 t0, MACH_COP_0_TLB_LOW # init low entry 829 li t0, 1 << VMMACH_TLB_INDEX_SHIFT 830 tlbwi # Write the TLB entry. 831 addu v0, v0, NBPG # 2nd HI entry 832 mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register 833 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 834 mtc0 t1, MACH_COP_0_TLB_LOW # init low entry 835 nop 836 tlbwi # Write the TLB entry. 837 li sp, KERNELSTACK - START_FRAME # switch to standard stack 838 b cpu_swtch 839 nop 840END(swtch_exit) 841 842/* 843 * When no processes are on the runq, cpu_swtch branches to idle 844 * to wait for something to come ready. 845 * Note: this is really a part of cpu_swtch() but defined here for kernel 846 * profiling. 847 */ 848LEAF(idle) 849 li t0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 850 mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts 851 sw zero, curproc # set curproc NULL for stats 8521: 853 lw t0, whichqs # look for non-empty queue 854 nop 855 beq t0, zero, 1b 856 nop 857 b sw1 858 mtc0 zero, MACH_COP_0_STATUS_REG # Disable all interrupts 859END(idle) 860 861/* 862 * cpu_swtch() 863 * Find the highest priority process and resume it. 864 */ 865NON_LEAF(cpu_swtch, STAND_FRAME_SIZE, ra) 866 sw sp, UADDR+U_PCB_CONTEXT+32 # save old sp 867 subu sp, sp, STAND_FRAME_SIZE 868 sw ra, STAND_RA_OFFSET(sp) 869 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 870 lw t2, cnt+V_SWTCH # for statistics 871 lw t1, whichqs # look for non-empty queue 872 mfc0 t0, MACH_COP_0_STATUS_REG # t0 = saved status register 873 sw ra, UADDR+U_PCB_CONTEXT+40 # save return address 874 sw t0, UADDR+U_PCB_CONTEXT+44 # save status register 875 addu t2, t2, 1 876 beq t1, zero, idle # if none, idle 877 sw t2, cnt+V_SWTCH 878 mtc0 zero, MACH_COP_0_STATUS_REG # Disable all interrupts 879sw1: 880 nop # wait for intrs disabled 881 nop 882 lw t0, whichqs # look for non-empty queue 883 li t2, -1 # t2 = lowest bit set 884 beq t0, zero, idle # if none, idle 885 move t3, t0 # t3 = saved whichqs 8861: 887 addu t2, t2, 1 888 and t1, t0, 1 # bit set? 889 beq t1, zero, 1b 890 srl t0, t0, 1 # try next bit 891/* 892 * Remove process from queue. 893 */ 894 sll t0, t2, 3 895 la t1, qs 896 addu t0, t0, t1 # t0 = qp = &qs[highbit] 897 lw a0, P_LINK(t0) # a0 = p = highest pri process 898 nop 899 lw v0, P_LINK(a0) # v0 = p->p_link 900 bne t0, a0, 2f # make sure something in queue 901 sw v0, P_LINK(t0) # qp->ph_link = p->p_link; 902 PANIC("cpu_swtch") # nothing in queue 9032: 904 sw t0, P_RLINK(v0) # p->p_link->p_rlink = qp 905 bne v0, t0, 3f # queue still not empty 906 sw zero, P_RLINK(a0) ## for firewall checking 907 li v1, 1 # compute bit in 'whichqs' 908 sll v1, v1, t2 909 xor t3, t3, v1 # clear bit in 'whichqs' 910 sw t3, whichqs 9113: 912/* 913 * Save old context and switch to new one. 914 */ 915 sw a0, curproc # set curproc 916 sw zero, want_resched 917 jal pmap_alloc_tlbpid # v0 = TLB PID 918 sw a0, STAND_FRAME_SIZE(sp) # save p 919 lw a0, STAND_FRAME_SIZE(sp) # restore p 920 sll v0, v0, VMMACH_TLB_PID_SHIFT # v0 = aligned PID 921 lw t0, P_UPTE+0(a0) # t0 = first u. pte 922 lw t1, P_UPTE+4(a0) # t1 = 2nd u. pte 923 sw s0, UADDR+U_PCB_CONTEXT+0 # do a 'savectx()' 924 sw s1, UADDR+U_PCB_CONTEXT+4 # We save s0 to s8 here because 925 sw s2, UADDR+U_PCB_CONTEXT+8 # the TLB trap code uses 926 sw s3, UADDR+U_PCB_CONTEXT+12 # CONTEXT and there should be 927 sw s4, UADDR+U_PCB_CONTEXT+16 # no faults at this point. 928 sw s5, UADDR+U_PCB_CONTEXT+20 929 sw s6, UADDR+U_PCB_CONTEXT+24 930 sw s7, UADDR+U_PCB_CONTEXT+28 931 sw s8, UADDR+U_PCB_CONTEXT+36 932 or v0, v0, UADDR # v0 = first HI entry 933/* 934 * Resume process indicated by the pte's for its u struct 935 * NOTE: This is hard coded to UPAGES == 2. 936 * Also, there should be no TLB faults at this point. 937 */ 938 mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register 939 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 940 mtc0 t0, MACH_COP_0_TLB_LOW # init low entry 941 li t0, 1 << VMMACH_TLB_INDEX_SHIFT 942 tlbwi # Write the TLB entry. 943 addu v0, v0, NBPG # 2nd HI entry 944 mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register 945 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 946 mtc0 t1, MACH_COP_0_TLB_LOW # init low entry 947 nop 948 tlbwi # Write the TLB entry. 949/* 950 * Now running on new u struct. 951 * Restore registers and return. 952 */ 953 lw v0, UADDR+U_PCB_CONTEXT+44 # restore kernel context 954 lw ra, UADDR+U_PCB_CONTEXT+40 955 lw s0, UADDR+U_PCB_CONTEXT+0 956 lw s1, UADDR+U_PCB_CONTEXT+4 957 lw s2, UADDR+U_PCB_CONTEXT+8 958 lw s3, UADDR+U_PCB_CONTEXT+12 959 lw s4, UADDR+U_PCB_CONTEXT+16 960 lw s5, UADDR+U_PCB_CONTEXT+20 961 lw s6, UADDR+U_PCB_CONTEXT+24 962 lw s7, UADDR+U_PCB_CONTEXT+28 963 lw sp, UADDR+U_PCB_CONTEXT+32 964 lw s8, UADDR+U_PCB_CONTEXT+36 965 mtc0 v0, MACH_COP_0_STATUS_REG 966 j ra 967 li v0, 1 # possible return to 'savectx()' 968END(cpu_swtch) 969 970/* 971 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to 972 * user text space. 973 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to 974 * user data space. 975 */ 976LEAF(fuword) 977ALEAF(fuiword) 978 blt a0, zero, fswberr # make sure address is in user space 979 li v0, FSWBERR 980 sw v0, UADDR+U_PCB_ONFAULT 981 lw v0, 0(a0) # fetch word 982 j ra 983 sw zero, UADDR+U_PCB_ONFAULT 984END(fuword) 985 986LEAF(fusword) 987ALEAF(fuisword) 988 blt a0, zero, fswberr # make sure address is in user space 989 li v0, FSWBERR 990 sw v0, UADDR+U_PCB_ONFAULT 991 lhu v0, 0(a0) # fetch short 992 j ra 993 sw zero, UADDR+U_PCB_ONFAULT 994END(fusword) 995 996LEAF(fubyte) 997ALEAF(fuibyte) 998 blt a0, zero, fswberr # make sure address is in user space 999 li v0, FSWBERR 1000 sw v0, UADDR+U_PCB_ONFAULT 1001 lbu v0, 0(a0) # fetch byte 1002 j ra 1003 sw zero, UADDR+U_PCB_ONFAULT 1004END(fubyte) 1005 1006LEAF(suword) 1007 blt a0, zero, fswberr # make sure address is in user space 1008 li v0, FSWBERR 1009 sw v0, UADDR+U_PCB_ONFAULT 1010 sw a1, 0(a0) # store word 1011 sw zero, UADDR+U_PCB_ONFAULT 1012 j ra 1013 move v0, zero 1014END(suword) 1015 1016/* 1017 * Have to flush instruction cache afterwards. 1018 */ 1019LEAF(suiword) 1020 blt a0, zero, fswberr # make sure address is in user space 1021 li v0, FSWBERR 1022 sw v0, UADDR+U_PCB_ONFAULT 1023 sw a1, 0(a0) # store word 1024 sw zero, UADDR+U_PCB_ONFAULT 1025 move v0, zero 1026 b MachFlushICache # NOTE: this should not clobber v0! 1027 li a1, 4 # size of word 1028END(suiword) 1029 1030/* 1031 * Will have to flush the instruction cache if byte merging is done in hardware. 1032 */ 1033LEAF(susword) 1034ALEAF(suisword) 1035 blt a0, zero, fswberr # make sure address is in user space 1036 li v0, FSWBERR 1037 sw v0, UADDR+U_PCB_ONFAULT 1038 sh a1, 0(a0) # store short 1039 sw zero, UADDR+U_PCB_ONFAULT 1040 j ra 1041 move v0, zero 1042END(susword) 1043 1044LEAF(subyte) 1045ALEAF(suibyte) 1046 blt a0, zero, fswberr # make sure address is in user space 1047 li v0, FSWBERR 1048 sw v0, UADDR+U_PCB_ONFAULT 1049 sb a1, 0(a0) # store byte 1050 sw zero, UADDR+U_PCB_ONFAULT 1051 j ra 1052 move v0, zero 1053END(subyte) 1054 1055LEAF(fswberr) 1056 j ra 1057 li v0, -1 1058END(fswberr) 1059 1060/* 1061 * fuswintr and suswintr are just like fusword and susword except that if 1062 * the page is not in memory or would cause a trap, then we return an error. 1063 * The important thing is to prevent sleep() and swtch(). 1064 */ 1065LEAF(fuswintr) 1066 blt a0, zero, fswintrberr # make sure address is in user space 1067 li v0, FSWINTRBERR 1068 sw v0, UADDR+U_PCB_ONFAULT 1069 lhu v0, 0(a0) # fetch short 1070 j ra 1071 sw zero, UADDR+U_PCB_ONFAULT 1072END(fuswintr) 1073 1074LEAF(suswintr) 1075 blt a0, zero, fswintrberr # make sure address is in user space 1076 li v0, FSWINTRBERR 1077 sw v0, UADDR+U_PCB_ONFAULT 1078 sh a1, 0(a0) # store short 1079 sw zero, UADDR+U_PCB_ONFAULT 1080 j ra 1081 move v0, zero 1082END(suswintr) 1083 1084LEAF(fswintrberr) 1085 j ra 1086 li v0, -1 1087END(fswintrberr) 1088 1089/* 1090 * Insert 'p' after 'q'. 1091 * _insque(p, q) 1092 * caddr_t p, q; 1093 */ 1094LEAF(_insque) 1095 lw v0, 0(a1) # v0 = q->next 1096 sw a1, 4(a0) # p->prev = q 1097 sw v0, 0(a0) # p->next = q->next 1098 sw a0, 4(v0) # q->next->prev = p 1099 j ra 1100 sw a0, 0(a1) # q->next = p 1101END(_insque) 1102 1103/* 1104 * Remove item 'p' from queue. 1105 * _remque(p) 1106 * caddr_t p; 1107 */ 1108LEAF(_remque) 1109 lw v0, 0(a0) # v0 = p->next 1110 lw v1, 4(a0) # v1 = p->prev 1111 nop 1112 sw v0, 0(v1) # p->prev->next = p->next 1113 j ra 1114 sw v1, 4(v0) # p->next->prev = p->prev 1115END(_remque) 1116 1117/* 1118 * This code is copied to the UTLB exception vector address to 1119 * handle user level TLB translation misses. 1120 * NOTE: This code must be relocatable!!! 1121 */ 1122 .globl MachUTLBMiss 1123MachUTLBMiss: 1124 .set noat 1125 mfc0 k0, MACH_COP_0_BAD_VADDR # get the virtual address 1126 nop 1127 srl k0, k0, PMAP_HASH_SHIFT1 # get page in low bits 1128 srl k1, k0, PMAP_HASH_SHIFT2 - PMAP_HASH_SHIFT1 1129 and k0, k0, PMAP_HASH_MASK1 1130 and k1, k1, PMAP_HASH_MASK2 1131 or k1, k1, k0 1132 sll k1, k1, PMAP_HASH_SIZE_SHIFT # compute index 1133 lw k0, PMAP_HASH_LOW_OFFSET(k1) # get cached low PTE entry 1134 lw k1, PMAP_HASH_HIGH_OFFSET(k1) # get cached high PTE entry 1135 mtc0 k0, MACH_COP_0_TLB_LOW 1136 mfc0 k0, MACH_COP_0_TLB_HI # get actual high PTE entry 1137 nop 1138 bne k0, k1, 1f # non-matching PTE 1139 mfc0 k0, MACH_COP_0_EXC_PC # get return address 1140 tlbwr # update TLB 1141 j k0 1142 rfe 11431: 1144 j UtlbFault # handle the rest 1145 nop 1146 .set at 1147 .globl MachUTLBMissEnd 1148MachUTLBMissEnd: 1149 1150/* 1151 * This code is copied to the general exception vector address to 1152 * handle all execptions except RESET and UTLBMiss. 1153 * NOTE: This code must be relocatable!!! 1154 */ 1155 .globl MachException 1156MachException: 1157/* 1158 * Find out what mode we came from and jump to the proper handler. 1159 */ 1160 .set noat 1161 mfc0 k0, MACH_COP_0_STATUS_REG # Get the status register 1162 mfc0 k1, MACH_COP_0_CAUSE_REG # Get the cause register value. 1163 and k0, k0, MACH_SR_KU_PREV # test for user mode 1164 sll k0, k0, 3 # shift user bit for cause index 1165 and k1, k1, MACH_CR_EXC_CODE # Mask out the cause bits. 1166 or k1, k1, k0 # change index to user table 11671: 1168 la k0, machExceptionTable # get base of the jump table 1169 addu k0, k0, k1 # Get the address of the 1170 # function entry. Note that 1171 # the cause is already 1172 # shifted left by 2 bits so 1173 # we dont have to shift. 1174 lw k0, 0(k0) # Get the function address 1175 nop 1176 j k0 # Jump to the function. 1177 nop 1178 .set at 1179 .globl MachExceptionEnd 1180MachExceptionEnd: 1181 1182/* 1183 * Handle the rest of the UTLB miss. 1184 */ 1185UtlbFault: 1186 .set noat 1187 mfc0 k0, MACH_COP_0_BAD_VADDR # get the virtual address 1188 nop 1189 srl k0, k0, PMAP_HASH_SHIFT1 # get page in low bits 1190 srl k1, k0, PMAP_HASH_SHIFT2 - PMAP_HASH_SHIFT1 1191 and k0, k0, PMAP_HASH_MASK1 1192 and k1, k1, PMAP_HASH_MASK2 1193 or k1, k1, k0 1194 sll k1, k1, PMAP_HASH_SIZE_SHIFT # compute index 1195 lw k0, PMAP_HASH_LOW_OFFSET+8(k1) # get cached low PTE entry 1196 lw k1, PMAP_HASH_HIGH_OFFSET+8(k1) # get cached high PTE entry 1197 mtc0 k0, MACH_COP_0_TLB_LOW 1198 mfc0 k0, MACH_COP_0_TLB_HI # get actual high PTE entry 1199 nop 1200 bne k0, k1, SlowFault # non-matching PTE 1201 mfc0 k0, MACH_COP_0_EXC_PC # get return address 1202 tlbwr # update TLB 1203 j k0 1204 rfe 1205/* 1206 * We couldn't find a TLB entry. 1207 * Find out what mode we came from and call the appropriate handler. 1208 */ 1209SlowFault: 1210 mfc0 k0, MACH_COP_0_STATUS_REG 1211 nop 1212 and k0, k0, MACH_SR_KU_PREV 1213 bne k0, zero, MachUserGenException 1214 nop 1215 .set at 1216/* 1217 * Fall though ... 1218 */ 1219 1220/*---------------------------------------------------------------------------- 1221 * 1222 * MachKernGenException -- 1223 * 1224 * Handle an exception from kernel mode. 1225 * 1226 * Results: 1227 * None. 1228 * 1229 * Side effects: 1230 * None. 1231 * 1232 *---------------------------------------------------------------------------- 1233 */ 1234 1235/* 1236 * The kernel exception stack contains 18 saved general registers, 1237 * the status register and the multiply lo and high registers. 1238 * In addition, we set this up for linkage conventions. 1239 */ 1240#define KERN_REG_SIZE (18 * 4) 1241#define KERN_REG_OFFSET (STAND_FRAME_SIZE) 1242#define KERN_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) 1243#define KERN_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) 1244#define KERN_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) 1245#define KERN_EXC_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) 1246 1247NON_LEAF(MachKernGenException, KERN_EXC_FRAME_SIZE, ra) 1248 .set noat 1249#ifdef KADB 1250 la k0, kdbpcb # save registers for kadb 1251 sw s0, (S0 * 4)(k0) 1252 sw s1, (S1 * 4)(k0) 1253 sw s2, (S2 * 4)(k0) 1254 sw s3, (S3 * 4)(k0) 1255 sw s4, (S4 * 4)(k0) 1256 sw s5, (S5 * 4)(k0) 1257 sw s6, (S6 * 4)(k0) 1258 sw s7, (S7 * 4)(k0) 1259 sw s8, (S8 * 4)(k0) 1260 sw gp, (GP * 4)(k0) 1261 sw sp, (SP * 4)(k0) 1262#endif 1263 subu sp, sp, KERN_EXC_FRAME_SIZE 1264 .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE) 1265/* 1266 * Save the relevant kernel registers onto the stack. 1267 * We don't need to save s0 - s8, sp and gp because 1268 * the compiler does it for us. 1269 */ 1270 sw AT, KERN_REG_OFFSET + 0(sp) 1271 sw v0, KERN_REG_OFFSET + 4(sp) 1272 sw v1, KERN_REG_OFFSET + 8(sp) 1273 sw a0, KERN_REG_OFFSET + 12(sp) 1274 mflo v0 1275 mfhi v1 1276 sw a1, KERN_REG_OFFSET + 16(sp) 1277 sw a2, KERN_REG_OFFSET + 20(sp) 1278 sw a3, KERN_REG_OFFSET + 24(sp) 1279 sw t0, KERN_REG_OFFSET + 28(sp) 1280 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1281 sw t1, KERN_REG_OFFSET + 32(sp) 1282 sw t2, KERN_REG_OFFSET + 36(sp) 1283 sw t3, KERN_REG_OFFSET + 40(sp) 1284 sw t4, KERN_REG_OFFSET + 44(sp) 1285 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1286 sw t5, KERN_REG_OFFSET + 48(sp) 1287 sw t6, KERN_REG_OFFSET + 52(sp) 1288 sw t7, KERN_REG_OFFSET + 56(sp) 1289 sw t8, KERN_REG_OFFSET + 60(sp) 1290 mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr. 1291 sw t9, KERN_REG_OFFSET + 64(sp) 1292 sw ra, KERN_REG_OFFSET + 68(sp) 1293 sw v0, KERN_MULT_LO_OFFSET(sp) 1294 sw v1, KERN_MULT_HI_OFFSET(sp) 1295 mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. 1296 sw a0, KERN_SR_OFFSET(sp) 1297/* 1298 * Call the exception handler. 1299 */ 1300 jal trap 1301 sw a3, STAND_RA_OFFSET(sp) # for debugging 1302/* 1303 * Restore registers and return from the exception. 1304 * v0 contains the return address. 1305 */ 1306 lw a0, KERN_SR_OFFSET(sp) 1307 lw t0, KERN_MULT_LO_OFFSET(sp) 1308 lw t1, KERN_MULT_HI_OFFSET(sp) 1309 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1310 mtlo t0 1311 mthi t1 1312 move k0, v0 1313 lw AT, KERN_REG_OFFSET + 0(sp) 1314 lw v0, KERN_REG_OFFSET + 4(sp) 1315 lw v1, KERN_REG_OFFSET + 8(sp) 1316 lw a0, KERN_REG_OFFSET + 12(sp) 1317 lw a1, KERN_REG_OFFSET + 16(sp) 1318 lw a2, KERN_REG_OFFSET + 20(sp) 1319 lw a3, KERN_REG_OFFSET + 24(sp) 1320 lw t0, KERN_REG_OFFSET + 28(sp) 1321 lw t1, KERN_REG_OFFSET + 32(sp) 1322 lw t2, KERN_REG_OFFSET + 36(sp) 1323 lw t3, KERN_REG_OFFSET + 40(sp) 1324 lw t4, KERN_REG_OFFSET + 44(sp) 1325 lw t5, KERN_REG_OFFSET + 48(sp) 1326 lw t6, KERN_REG_OFFSET + 52(sp) 1327 lw t7, KERN_REG_OFFSET + 56(sp) 1328 lw t8, KERN_REG_OFFSET + 60(sp) 1329 lw t9, KERN_REG_OFFSET + 64(sp) 1330 lw ra, KERN_REG_OFFSET + 68(sp) 1331 addu sp, sp, KERN_EXC_FRAME_SIZE 1332 j k0 # Now return from the 1333 rfe # exception. 1334 .set at 1335END(MachKernGenException) 1336 1337/*---------------------------------------------------------------------------- 1338 * 1339 * MachUserGenException -- 1340 * 1341 * Handle an exception from user mode. 1342 * 1343 * Results: 1344 * None. 1345 * 1346 * Side effects: 1347 * None. 1348 * 1349 *---------------------------------------------------------------------------- 1350 */ 1351NON_LEAF(MachUserGenException, STAND_FRAME_SIZE, ra) 1352 .set noat 1353 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 1354/* 1355 * Save all of the registers except for the kernel temporaries in u.u_pcb. 1356 */ 1357 sw AT, UADDR+U_PCB_REGS+(AST * 4) 1358 sw v0, UADDR+U_PCB_REGS+(V0 * 4) 1359 sw v1, UADDR+U_PCB_REGS+(V1 * 4) 1360 sw a0, UADDR+U_PCB_REGS+(A0 * 4) 1361 mflo v0 1362 sw a1, UADDR+U_PCB_REGS+(A1 * 4) 1363 sw a2, UADDR+U_PCB_REGS+(A2 * 4) 1364 sw a3, UADDR+U_PCB_REGS+(A3 * 4) 1365 sw t0, UADDR+U_PCB_REGS+(T0 * 4) 1366 mfhi v1 1367 sw t1, UADDR+U_PCB_REGS+(T1 * 4) 1368 sw t2, UADDR+U_PCB_REGS+(T2 * 4) 1369 sw t3, UADDR+U_PCB_REGS+(T3 * 4) 1370 sw t4, UADDR+U_PCB_REGS+(T4 * 4) 1371 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1372 sw t5, UADDR+U_PCB_REGS+(T5 * 4) 1373 sw t6, UADDR+U_PCB_REGS+(T6 * 4) 1374 sw t7, UADDR+U_PCB_REGS+(T7 * 4) 1375 sw s0, UADDR+U_PCB_REGS+(S0 * 4) 1376 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1377 sw s1, UADDR+U_PCB_REGS+(S1 * 4) 1378 sw s2, UADDR+U_PCB_REGS+(S2 * 4) 1379 sw s3, UADDR+U_PCB_REGS+(S3 * 4) 1380 sw s4, UADDR+U_PCB_REGS+(S4 * 4) 1381 mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr 1382 sw s5, UADDR+U_PCB_REGS+(S5 * 4) 1383 sw s6, UADDR+U_PCB_REGS+(S6 * 4) 1384 sw s7, UADDR+U_PCB_REGS+(S7 * 4) 1385 sw t8, UADDR+U_PCB_REGS+(T8 * 4) 1386 mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. 1387 sw t9, UADDR+U_PCB_REGS+(T9 * 4) 1388 sw gp, UADDR+U_PCB_REGS+(GP * 4) 1389 sw sp, UADDR+U_PCB_REGS+(SP * 4) 1390 sw s8, UADDR+U_PCB_REGS+(S8 * 4) 1391 li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP 1392 sw ra, UADDR+U_PCB_REGS+(RA * 4) 1393 sw v0, UADDR+U_PCB_REGS+(MULLO * 4) 1394 sw v1, UADDR+U_PCB_REGS+(MULHI * 4) 1395 sw a0, UADDR+U_PCB_REGS+(SR * 4) 1396 # la gp, _gp # switch to kernel GP 1397 sw a3, UADDR+U_PCB_REGS+(PC * 4) 1398 sw a3, STAND_RA_OFFSET(sp) # for debugging 1399 .set at 1400 and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. 1401 .set noat 1402/* 1403 * Call the exception handler. 1404 */ 1405 jal trap 1406 mtc0 t0, MACH_COP_0_STATUS_REG 1407/* 1408 * Restore user registers and return. NOTE: interrupts are enabled. 1409 */ 1410 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1411 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1412 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1413 mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts 1414 mtlo t0 1415 mthi t1 1416 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1417 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1418 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1419 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1420 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1421 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1422 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1423 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1424 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1425 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1426 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1427 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1428 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1429 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1430 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1431 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1432 lw s0, UADDR+U_PCB_REGS+(S0 * 4) 1433 lw s1, UADDR+U_PCB_REGS+(S1 * 4) 1434 lw s2, UADDR+U_PCB_REGS+(S2 * 4) 1435 lw s3, UADDR+U_PCB_REGS+(S3 * 4) 1436 lw s4, UADDR+U_PCB_REGS+(S4 * 4) 1437 lw s5, UADDR+U_PCB_REGS+(S5 * 4) 1438 lw s6, UADDR+U_PCB_REGS+(S6 * 4) 1439 lw s7, UADDR+U_PCB_REGS+(S7 * 4) 1440 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1441 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1442 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1443 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1444 lw s8, UADDR+U_PCB_REGS+(S8 * 4) 1445 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1446 j k0 1447 rfe 1448 .set at 1449END(MachUserGenException) 1450 1451/*---------------------------------------------------------------------------- 1452 * 1453 * MachKernIntr -- 1454 * 1455 * Handle an interrupt from kernel mode. 1456 * Interrupts must use a separate stack since during exit() 1457 * there is a window of time when there is no kernel stack. 1458 * 1459 * Results: 1460 * None. 1461 * 1462 * Side effects: 1463 * None. 1464 * 1465 *---------------------------------------------------------------------------- 1466 */ 1467#define KINTR_REG_OFFSET (STAND_FRAME_SIZE) 1468#define KINTR_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) 1469#define KINTR_SP_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) 1470#define KINTR_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) 1471#define KINTR_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) 1472#define KINTR_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 16) 1473 1474NON_LEAF(MachKernIntr, KINTR_FRAME_SIZE, ra) 1475 .set noat 1476 .mask 0x80000000, (STAND_RA_OFFSET - KINTR_FRAME_SIZE) 1477/* 1478 * Check to see if we are already on the interrupt stack. 1479 */ 1480 li k0, MACH_CODE_START # interrupt stack below code 1481 sltu k1, sp, k0 1482 beq k1, zero, 1f # no, init sp 1483 nop 1484 sw sp, KINTR_SP_OFFSET - KINTR_FRAME_SIZE(sp) # save old sp 1485 b 2f 1486 subu sp, sp, KINTR_FRAME_SIZE # allocate stack frame 14871: 1488 sw sp, KINTR_SP_OFFSET - KINTR_FRAME_SIZE(k0) # save old sp 1489 subu sp, k0, KINTR_FRAME_SIZE # switch to interrupt stack 14902: 1491/* 1492 * Save the relevant kernel registers onto the stack. 1493 * We don't need to save s0 - s8, sp and gp because 1494 * the compiler does it for us. 1495 */ 1496 sw AT, KINTR_REG_OFFSET + 0(sp) 1497 sw v0, KINTR_REG_OFFSET + 4(sp) 1498 sw v1, KINTR_REG_OFFSET + 8(sp) 1499 sw a0, KINTR_REG_OFFSET + 12(sp) 1500 mflo v0 1501 mfhi v1 1502 sw a1, KINTR_REG_OFFSET + 16(sp) 1503 sw a2, KINTR_REG_OFFSET + 20(sp) 1504 sw a3, KINTR_REG_OFFSET + 24(sp) 1505 sw t0, KINTR_REG_OFFSET + 28(sp) 1506 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1507 sw t1, KINTR_REG_OFFSET + 32(sp) 1508 sw t2, KINTR_REG_OFFSET + 36(sp) 1509 sw t3, KINTR_REG_OFFSET + 40(sp) 1510 sw t4, KINTR_REG_OFFSET + 44(sp) 1511 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1512 sw t5, KINTR_REG_OFFSET + 48(sp) 1513 sw t6, KINTR_REG_OFFSET + 52(sp) 1514 sw t7, KINTR_REG_OFFSET + 56(sp) 1515 sw t8, KINTR_REG_OFFSET + 60(sp) 1516 mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. 1517 sw t9, KINTR_REG_OFFSET + 64(sp) 1518 sw ra, KINTR_REG_OFFSET + 68(sp) 1519 sw v0, KINTR_MULT_LO_OFFSET(sp) 1520 sw v1, KINTR_MULT_HI_OFFSET(sp) 1521 sw a0, KINTR_SR_OFFSET(sp) 1522/* 1523 * Call the interrupt handler. 1524 */ 1525 jal interrupt 1526 sw a2, STAND_RA_OFFSET(sp) # for debugging 1527/* 1528 * Restore registers and return from the interrupt. 1529 */ 1530 lw a0, KINTR_SR_OFFSET(sp) 1531 lw t0, KINTR_MULT_LO_OFFSET(sp) 1532 lw t1, KINTR_MULT_HI_OFFSET(sp) 1533 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1534 mtlo t0 1535 mthi t1 1536 lw k0, STAND_RA_OFFSET(sp) 1537 lw AT, KINTR_REG_OFFSET + 0(sp) 1538 lw v0, KINTR_REG_OFFSET + 4(sp) 1539 lw v1, KINTR_REG_OFFSET + 8(sp) 1540 lw a0, KINTR_REG_OFFSET + 12(sp) 1541 lw a1, KINTR_REG_OFFSET + 16(sp) 1542 lw a2, KINTR_REG_OFFSET + 20(sp) 1543 lw a3, KINTR_REG_OFFSET + 24(sp) 1544 lw t0, KINTR_REG_OFFSET + 28(sp) 1545 lw t1, KINTR_REG_OFFSET + 32(sp) 1546 lw t2, KINTR_REG_OFFSET + 36(sp) 1547 lw t3, KINTR_REG_OFFSET + 40(sp) 1548 lw t4, KINTR_REG_OFFSET + 44(sp) 1549 lw t5, KINTR_REG_OFFSET + 48(sp) 1550 lw t6, KINTR_REG_OFFSET + 52(sp) 1551 lw t7, KINTR_REG_OFFSET + 56(sp) 1552 lw t8, KINTR_REG_OFFSET + 60(sp) 1553 lw t9, KINTR_REG_OFFSET + 64(sp) 1554 lw ra, KINTR_REG_OFFSET + 68(sp) 1555 lw sp, KINTR_SP_OFFSET(sp) # restore orig sp 1556 j k0 # Now return from the 1557 rfe # interrupt. 1558 .set at 1559END(MachKernIntr) 1560 1561/*---------------------------------------------------------------------------- 1562 * 1563 * MachUserIntr -- 1564 * 1565 * Handle an interrupt from user mode. 1566 * Note: we save minimal state in the u.u_pcb struct and use the standard 1567 * kernel stack since there has to be a u page if we came from user mode. 1568 * If there is a pending software interrupt, then save the remaining state 1569 * and call softintr(). This is all because if we call swtch() inside 1570 * interrupt(), not all the user registers have been saved in u.u_pcb. 1571 * 1572 * Results: 1573 * None. 1574 * 1575 * Side effects: 1576 * None. 1577 * 1578 *---------------------------------------------------------------------------- 1579 */ 1580NON_LEAF(MachUserIntr, STAND_FRAME_SIZE, ra) 1581 .set noat 1582 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 1583/* 1584 * Save the relevant user registers into the u.u_pcb struct. 1585 * We don't need to save s0 - s8 because 1586 * the compiler does it for us. 1587 */ 1588 sw AT, UADDR+U_PCB_REGS+(AST * 4) 1589 sw v0, UADDR+U_PCB_REGS+(V0 * 4) 1590 sw v1, UADDR+U_PCB_REGS+(V1 * 4) 1591 sw a0, UADDR+U_PCB_REGS+(A0 * 4) 1592 mflo v0 1593 mfhi v1 1594 sw a1, UADDR+U_PCB_REGS+(A1 * 4) 1595 sw a2, UADDR+U_PCB_REGS+(A2 * 4) 1596 sw a3, UADDR+U_PCB_REGS+(A3 * 4) 1597 sw t0, UADDR+U_PCB_REGS+(T0 * 4) 1598 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1599 sw t1, UADDR+U_PCB_REGS+(T1 * 4) 1600 sw t2, UADDR+U_PCB_REGS+(T2 * 4) 1601 sw t3, UADDR+U_PCB_REGS+(T3 * 4) 1602 sw t4, UADDR+U_PCB_REGS+(T4 * 4) 1603 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1604 sw t5, UADDR+U_PCB_REGS+(T5 * 4) 1605 sw t6, UADDR+U_PCB_REGS+(T6 * 4) 1606 sw t7, UADDR+U_PCB_REGS+(T7 * 4) 1607 sw t8, UADDR+U_PCB_REGS+(T8 * 4) 1608 mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. 1609 sw t9, UADDR+U_PCB_REGS+(T9 * 4) 1610 sw gp, UADDR+U_PCB_REGS+(GP * 4) 1611 sw sp, UADDR+U_PCB_REGS+(SP * 4) 1612 sw ra, UADDR+U_PCB_REGS+(RA * 4) 1613 li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP 1614 sw v0, UADDR+U_PCB_REGS+(MULLO * 4) 1615 sw v1, UADDR+U_PCB_REGS+(MULHI * 4) 1616 sw a0, UADDR+U_PCB_REGS+(SR * 4) 1617 sw a2, UADDR+U_PCB_REGS+(PC * 4) 1618 # la gp, _gp # switch to kernel GP 1619 .set at 1620 and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. 1621 .set noat 1622 mtc0 t0, MACH_COP_0_STATUS_REG 1623/* 1624 * Call the interrupt handler. 1625 */ 1626 jal interrupt 1627 sw a2, STAND_RA_OFFSET(sp) # for debugging 1628/* 1629 * Restore registers and return from the interrupt. 1630 */ 1631 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1632 lw v0, astpending # any pending interrupts? 1633 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1634 bne v0, zero, 1f # dont restore, call softintr 1635 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1636 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1637 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1638 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1639 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1640 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1641 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1642 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1643 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1644 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1645 mtlo t0 1646 mthi t1 1647 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1648 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1649 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1650 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1651 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1652 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1653 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1654 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1655 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1656 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1657 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1658 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1659 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1660 j k0 # Now return from the 1661 rfe # interrupt. 1662 16631: 1664/* 1665 * We have pending software interrupts; save remaining user state in u.u_pcb. 1666 */ 1667 sw s0, UADDR+U_PCB_REGS+(S0 * 4) 1668 sw s1, UADDR+U_PCB_REGS+(S1 * 4) 1669 sw s2, UADDR+U_PCB_REGS+(S2 * 4) 1670 sw s3, UADDR+U_PCB_REGS+(S3 * 4) 1671 sw s4, UADDR+U_PCB_REGS+(S4 * 4) 1672 sw s5, UADDR+U_PCB_REGS+(S5 * 4) 1673 sw s6, UADDR+U_PCB_REGS+(S6 * 4) 1674 sw s7, UADDR+U_PCB_REGS+(S7 * 4) 1675 sw s8, UADDR+U_PCB_REGS+(S8 * 4) 1676 li t0, MACH_HARD_INT_MASK | MACH_SR_INT_ENA_CUR 1677/* 1678 * Call the software interrupt handler. 1679 */ 1680 jal softintr 1681 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts (spl0) 1682/* 1683 * Restore user registers and return. NOTE: interrupts are enabled. 1684 */ 1685 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1686 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1687 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1688 mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts 1689 mtlo t0 1690 mthi t1 1691 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1692 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1693 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1694 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1695 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1696 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1697 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1698 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1699 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1700 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1701 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1702 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1703 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1704 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1705 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1706 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1707 lw s0, UADDR+U_PCB_REGS+(S0 * 4) 1708 lw s1, UADDR+U_PCB_REGS+(S1 * 4) 1709 lw s2, UADDR+U_PCB_REGS+(S2 * 4) 1710 lw s3, UADDR+U_PCB_REGS+(S3 * 4) 1711 lw s4, UADDR+U_PCB_REGS+(S4 * 4) 1712 lw s5, UADDR+U_PCB_REGS+(S5 * 4) 1713 lw s6, UADDR+U_PCB_REGS+(S6 * 4) 1714 lw s7, UADDR+U_PCB_REGS+(S7 * 4) 1715 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1716 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1717 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1718 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1719 lw s8, UADDR+U_PCB_REGS+(S8 * 4) 1720 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1721 j k0 1722 rfe 1723 .set at 1724END(MachUserIntr) 1725 1726#if 0 1727/*---------------------------------------------------------------------------- 1728 * 1729 * MachTLBModException -- 1730 * 1731 * Handle a TLB modified exception. 1732 * The BaddVAddr, Context, and EntryHi registers contain the failed 1733 * virtual address. 1734 * 1735 * Results: 1736 * None. 1737 * 1738 * Side effects: 1739 * None. 1740 * 1741 *---------------------------------------------------------------------------- 1742 */ 1743LEAF(MachTLBModException) 1744 .set noat 1745 tlbp # find the TLB entry 1746 mfc0 k0, MACH_COP_0_TLB_LOW # get the physical address 1747 mfc0 k1, MACH_COP_0_TLB_INDEX # check to be sure its valid 1748 or k0, k0, VMMACH_TLB_MOD_BIT # update TLB 1749 blt k1, zero, 4f # not found!!! 1750 mtc0 k0, MACH_COP_0_TLB_LOW 1751 li k1, MACH_CACHED_MEMORY_ADDR 1752 subu k0, k0, k1 1753 srl k0, k0, VMMACH_TLB_PHYS_PAGE_SHIFT 1754 la k1, pmap_attributes 1755 addu k0, k0, k1 1756 lbu k1, 0(k0) # fetch old value 1757 nop 1758 or k1, k1, 1 # set modified bit 1759 sb k1, 0(k0) # save new value 1760 mfc0 k0, MACH_COP_0_EXC_PC # get return address 1761 nop 1762 j k0 1763 rfe 17644: 1765 break 0 # panic 1766 .set at 1767END(MachTLBModException) 1768#endif 1769 1770/*---------------------------------------------------------------------------- 1771 * 1772 * MachTLBMissException -- 1773 * 1774 * Handle a TLB miss exception from kernel mode. 1775 * The BaddVAddr, Context, and EntryHi registers contain the failed 1776 * virtual address. 1777 * 1778 * Results: 1779 * None. 1780 * 1781 * Side effects: 1782 * None. 1783 * 1784 *---------------------------------------------------------------------------- 1785 */ 1786LEAF(MachTLBMissException) 1787 .set noat 1788 mfc0 k0, MACH_COP_0_BAD_VADDR # get the fault address 1789 li k1, MACH_KSEG2_ADDR # compute index 1790 subu k0, k0, k1 1791 srl k0, k0, PGSHIFT 1792 li k1, PMAP_HASH_KPAGES * NPTEPG # index within range? 1793 sltu k1, k0, k1 1794 beq k1, zero, MachKernGenException # No. do it the long way 1795 sll k0, k0, 2 # compute offset from index 1796 li k1, PMAP_HASH_KADDR 1797 addu k0, k0, k1 1798 lw k0, 0(k0) # get PTE entry 1799 mfc0 k1, MACH_COP_0_EXC_PC # get return address 1800 mtc0 k0, MACH_COP_0_TLB_LOW # save PTE entry 1801 and k0, k0, PG_V # make sure its valid 1802 beq k0, zero, MachKernGenException # No. do it the long way 1803 nop 1804 tlbwr # update TLB 1805 j k1 1806 rfe 1807 .set at 1808END(MachTLBMissException) 1809 1810/* 1811 * Set/clear software interrupt routines. 1812 */ 1813 1814LEAF(setsoftclock) 1815 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1816 nop 1817 or v0, v0, MACH_SOFT_INT_MASK_0 # set soft clock interrupt 1818 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1819 j ra 1820 nop 1821END(setsoftclock) 1822 1823LEAF(clearsoftclock) 1824 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1825 nop 1826 and v0, v0, ~MACH_SOFT_INT_MASK_0 # clear soft clock interrupt 1827 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1828 j ra 1829 nop 1830END(clearsoftclock) 1831 1832LEAF(setsoftnet) 1833 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1834 nop 1835 or v0, v0, MACH_SOFT_INT_MASK_1 # set soft net interrupt 1836 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1837 j ra 1838 nop 1839END(setsoftnet) 1840 1841LEAF(clearsoftnet) 1842 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1843 nop 1844 and v0, v0, ~MACH_SOFT_INT_MASK_1 # clear soft net interrupt 1845 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1846 j ra 1847 nop 1848END(clearsoftnet) 1849 1850/* 1851 * Set/change interrupt priority routines. 1852 */ 1853 1854LEAF(MachEnableIntr) 1855 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1856 nop 1857 or v0, v0, MACH_SR_INT_ENA_CUR 1858 mtc0 v0, MACH_COP_0_STATUS_REG # enable all interrupts 1859 j ra 1860 nop 1861END(MachEnableIntr) 1862 1863LEAF(spl0) 1864 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1865 nop 1866 or t0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1867 mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts 1868 j ra 1869 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1870END(spl0) 1871 1872LEAF(splsoftclock) 1873 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1874 li t0, ~MACH_SOFT_INT_MASK_0 # disable soft clock 1875 and t0, t0, v0 1876 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1877 j ra 1878 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1879END(splsoftclock) 1880 1881LEAF(Mach_spl0) 1882 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1883 li t0, ~(MACH_INT_MASK_0|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1884 and t0, t0, v0 1885 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1886 j ra 1887 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1888END(Mach_spl0) 1889 1890LEAF(Mach_spl1) 1891 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1892 li t0, ~(MACH_INT_MASK_1|MACH_SOFT_INT_MASK_0|MACH_SOFT_INT_MASK_1) 1893 and t0, t0, v0 1894 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1895 j ra 1896 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1897END(Mach_spl1) 1898 1899LEAF(Mach_spl2) 1900 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1901 li t0, ~(MACH_INT_MASK_2|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1902 and t0, t0, v0 1903 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1904 j ra 1905 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1906END(Mach_spl2) 1907 1908LEAF(Mach_spl3) 1909 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1910 li t0, ~(MACH_INT_MASK_3|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1911 and t0, t0, v0 1912 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1913 j ra 1914 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1915END(Mach_spl3) 1916 1917LEAF(splhigh) 1918 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1919 li t0, ~MACH_SR_INT_ENA_CUR # disable all interrupts 1920 and t0, t0, v0 1921 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1922 j ra 1923 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1924END(splhigh) 1925 1926/* 1927 * Restore saved interrupt mask. 1928 */ 1929LEAF(splx) 1930 mfc0 v0, MACH_COP_0_STATUS_REG 1931 li t0, ~(MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1932 and t0, t0, v0 1933 or t0, t0, a0 1934 mtc0 t0, MACH_COP_0_STATUS_REG 1935 j ra 1936 nop 1937END(splx) 1938 1939/*---------------------------------------------------------------------------- 1940 * 1941 * MachEmptyWriteBuffer -- 1942 * 1943 * Return when the write buffer is empty. 1944 * 1945 * MachEmptyWriteBuffer() 1946 * 1947 * Results: 1948 * None. 1949 * 1950 * Side effects: 1951 * None. 1952 * 1953 *---------------------------------------------------------------------------- 1954 */ 1955LEAF(MachEmptyWriteBuffer) 1956 nop 1957 nop 1958 nop 1959 nop 19601: bc0f 1b 1961 nop 1962 j ra 1963 nop 1964END(MachEmptyWriteBuffer) 1965 1966/*-------------------------------------------------------------------------- 1967 * 1968 * MachTLBWriteIndexed -- 1969 * 1970 * Write the given entry into the TLB at the given index. 1971 * 1972 * MachTLBWriteIndexed(index, highEntry, lowEntry) 1973 * int index; 1974 * int highEntry; 1975 * int lowEntry; 1976 * 1977 * Results: 1978 * None. 1979 * 1980 * Side effects: 1981 * TLB entry set. 1982 * 1983 *-------------------------------------------------------------------------- 1984 */ 1985LEAF(MachTLBWriteIndexed) 1986 mfc0 t1, MACH_COP_0_STATUS_REG # Save the status register. 1987 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 1988 mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID. 1989 1990 sll a0, a0, VMMACH_TLB_INDEX_SHIFT 1991 mtc0 a0, MACH_COP_0_TLB_INDEX # Set the index. 1992 mtc0 a1, MACH_COP_0_TLB_HI # Set up entry high. 1993 mtc0 a2, MACH_COP_0_TLB_LOW # Set up entry low. 1994 nop 1995 tlbwi # Write the TLB 1996 1997 mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID. 1998 j ra 1999 mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register 2000END(MachTLBWriteIndexed) 2001 2002/*-------------------------------------------------------------------------- 2003 * 2004 * MachTLBWriteRandom -- 2005 * 2006 * Write the given entry into the TLB at a random location. 2007 * 2008 * MachTLBWriteRandom(highEntry, lowEntry) 2009 * unsigned highEntry; 2010 * unsigned lowEntry; 2011 * 2012 * Results: 2013 * None. 2014 * 2015 * Side effects: 2016 * TLB entry set. 2017 * 2018 *-------------------------------------------------------------------------- 2019 */ 2020LEAF(MachTLBWriteRandom) 2021 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2022 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2023 mfc0 v0, MACH_COP_0_TLB_HI # Save the current PID. 2024 2025 mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. 2026 mtc0 a1, MACH_COP_0_TLB_LOW # Set up entry low. 2027 nop 2028 tlbwr # Write the TLB 2029 2030 mtc0 v0, MACH_COP_0_TLB_HI # Restore the PID. 2031 j ra 2032 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2033END(MachTLBWriteRandom) 2034 2035/*-------------------------------------------------------------------------- 2036 * 2037 * MachSetPID -- 2038 * 2039 * Write the given pid into the TLB pid reg. 2040 * 2041 * MachSetPID(pid) 2042 * int pid; 2043 * 2044 * Results: 2045 * None. 2046 * 2047 * Side effects: 2048 * PID set in the entry hi register. 2049 * 2050 *-------------------------------------------------------------------------- 2051 */ 2052LEAF(MachSetPID) 2053 sll a0, a0, VMMACH_TLB_PID_SHIFT # put PID in right spot 2054 mtc0 a0, MACH_COP_0_TLB_HI # Write the hi reg value 2055 j ra 2056 nop 2057END(MachSetPID) 2058 2059/*-------------------------------------------------------------------------- 2060 * 2061 * MachTLBFlush -- 2062 * 2063 * Flush the "random" entries from the TLB. 2064 * 2065 * MachTLBFlush() 2066 * 2067 * Results: 2068 * None. 2069 * 2070 * Side effects: 2071 * The TLB is flushed. 2072 * 2073 *-------------------------------------------------------------------------- 2074 */ 2075LEAF(MachTLBFlush) 2076 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2077 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2078 mfc0 t0, MACH_COP_0_TLB_HI # Save the PID 2079 li t1, MACH_RESERVED_ADDR # invalid address 2080 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 2081 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2082/* 2083 * Align the starting value (t1) and the upper bound (t2). 2084 */ 2085 li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT 2086 li t2, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 20871: 2088 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. 2089 addu t1, t1, 1 << VMMACH_TLB_INDEX_SHIFT # Increment index. 2090 bne t1, t2, 1b # NB: always executes next 2091 tlbwi # Write the TLB entry. 2092 2093 mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID 2094 j ra 2095 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2096END(MachTLBFlush) 2097 2098/*-------------------------------------------------------------------------- 2099 * 2100 * MachTLBFlushPID -- 2101 * 2102 * Flush all entries with the given PID from the TLB. 2103 * 2104 * MachTLBFlushPID(pid) 2105 * int pid; 2106 * 2107 * Results: 2108 * None. 2109 * 2110 * Side effects: 2111 * All entries corresponding to this PID are flushed. 2112 * 2113 *-------------------------------------------------------------------------- 2114 */ 2115LEAF(MachTLBFlushPID) 2116 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2117 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2118 mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID 2119 sll a0, a0, VMMACH_TLB_PID_SHIFT # Align the pid to flush. 2120/* 2121 * Align the starting value (t1) and the upper bound (t2). 2122 */ 2123 li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT 2124 li t2, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 2125 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register 21261: 2127 addu t1, t1, 1 << VMMACH_TLB_INDEX_SHIFT # Increment index. 2128 tlbr # Read from the TLB 2129 mfc0 t4, MACH_COP_0_TLB_HI # Fetch the hi register. 2130 nop 2131 and t4, t4, VMMACH_TLB_PID # compare PIDs 2132 bne t4, a0, 2f 2133 li v0, MACH_RESERVED_ADDR # invalid address 2134 mtc0 v0, MACH_COP_0_TLB_HI # Mark entry high as invalid 2135 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2136 nop 2137 tlbwi # Write the entry. 21382: 2139 bne t1, t2, 1b 2140 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register 2141 2142 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2143 j ra 2144 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2145END(MachTLBFlushPID) 2146 2147/*-------------------------------------------------------------------------- 2148 * 2149 * MachTLBFlushAddr -- 2150 * 2151 * Flush any TLB entries for the given address and TLB PID. 2152 * 2153 * MachTLBFlushAddr(highreg) 2154 * unsigned highreg; 2155 * 2156 * Results: 2157 * None. 2158 * 2159 * Side effects: 2160 * The process's page is flushed from the TLB. 2161 * 2162 *-------------------------------------------------------------------------- 2163 */ 2164LEAF(MachTLBFlushAddr) 2165 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2166 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2167 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2168 2169 mtc0 a0, MACH_COP_0_TLB_HI # look for addr & PID 2170 nop 2171 tlbp # Probe for the entry. 2172 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2173 li t1, MACH_RESERVED_ADDR # Load invalid entry. 2174 bltz v0, 1f # index < 0 => !found 2175 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 2176 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2177 nop 2178 tlbwi 21791: 2180 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2181 j ra 2182 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2183END(MachTLBFlushAddr) 2184 2185/*-------------------------------------------------------------------------- 2186 * 2187 * MachTLBUpdate -- 2188 * 2189 * Update the TLB if highreg is found. 2190 * 2191 * MachTLBUpdate(highreg, lowreg) 2192 * unsigned highreg, lowreg; 2193 * 2194 * Results: 2195 * None. 2196 * 2197 * Side effects: 2198 * None. 2199 * 2200 *-------------------------------------------------------------------------- 2201 */ 2202LEAF(MachTLBUpdate) 2203 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2204 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2205 mfc0 t0, MACH_COP_0_TLB_HI # Save current PID 2206 2207 mtc0 a0, MACH_COP_0_TLB_HI # init high reg. 2208 mtc0 a1, MACH_COP_0_TLB_LOW # init low reg. 2209 nop 2210 tlbp # Probe for the entry. 2211 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2212 nop 2213 bltz v0, 1f # index < 0 => !found 2214 nop 2215 tlbwi 22161: 2217 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2218 j ra 2219 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2220END(MachTLBUpdate) 2221 2222#if defined(DEBUG) || defined(KADB) 2223/*-------------------------------------------------------------------------- 2224 * 2225 * MachTLBFind -- 2226 * 2227 * Search the TLB for the given entry. 2228 * 2229 * MachTLBFind(hi) 2230 * unsigned hi; 2231 * 2232 * Results: 2233 * Returns a value >= 0 if the entry was found (the index). 2234 * Returns a value < 0 if the entry was not found. 2235 * 2236 * Side effects: 2237 * tlbhi and tlblo will contain the TLB entry found. 2238 * 2239 *-------------------------------------------------------------------------- 2240 */ 2241 .comm tlbhi, 4 2242 .comm tlblo, 4 2243LEAF(MachTLBFind) 2244 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2245 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2246 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2247 nop 2248 mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. 2249 nop 2250 tlbp # Probe for the entry. 2251 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2252 nop 2253 bltz v0, 1f # not found 2254 nop 2255 tlbr # read TLB 2256 mfc0 t1, MACH_COP_0_TLB_HI # See what we got 2257 mfc0 t2, MACH_COP_0_TLB_LOW # See what we got 2258 sw t1, tlbhi 2259 sw t2, tlblo 22601: 2261 mtc0 t0, MACH_COP_0_TLB_HI # Restore current PID 2262 j ra 2263 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2264END(MachTLBFind) 2265 2266/*-------------------------------------------------------------------------- 2267 * 2268 * MachTLBRead -- 2269 * 2270 * Read the TLB entry. 2271 * 2272 * MachTLBRead(entry) 2273 * unsigned entry; 2274 * 2275 * Results: 2276 * None. 2277 * 2278 * Side effects: 2279 * tlbhi and tlblo will contain the TLB entry found. 2280 * 2281 *-------------------------------------------------------------------------- 2282 */ 2283LEAF(MachTLBRead) 2284 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2285 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2286 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2287 2288 sll a0, a0, VMMACH_TLB_INDEX_SHIFT 2289 mtc0 a0, MACH_COP_0_TLB_INDEX # Set the index register 2290 nop 2291 tlbr # Read from the TLB 2292 mfc0 t3, MACH_COP_0_TLB_HI # fetch the hi entry 2293 mfc0 t4, MACH_COP_0_TLB_LOW # fetch the low entry 2294 sw t3, tlbhi 2295 sw t4, tlblo 2296 2297 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2298 j ra 2299 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2300END(MachTLBRead) 2301 2302/*-------------------------------------------------------------------------- 2303 * 2304 * MachTLBGetPID -- 2305 * 2306 * MachTLBGetPID() 2307 * 2308 * Results: 2309 * Returns the current TLB pid reg. 2310 * 2311 * Side effects: 2312 * None. 2313 * 2314 *-------------------------------------------------------------------------- 2315 */ 2316LEAF(MachTLBGetPID) 2317 mfc0 v0, MACH_COP_0_TLB_HI # get PID 2318 nop 2319 and v0, v0, VMMACH_TLB_PID # mask off PID 2320 j ra 2321 srl v0, v0, VMMACH_TLB_PID_SHIFT # put PID in right spot 2322END(MachTLBGetPID) 2323 2324/* 2325 * Return the current value of the cause register. 2326 */ 2327LEAF(MachGetCauseReg) 2328 mfc0 v0, MACH_COP_0_CAUSE_REG 2329 j ra 2330 nop 2331END(MachGetCauseReg) 2332#endif /* DEBUG */ 2333 2334/*---------------------------------------------------------------------------- 2335 * 2336 * MachSwitchFPState -- 2337 * 2338 * Save the current state into 'from' and restore it from 'to'. 2339 * 2340 * MachSwitchFPState(from, to) 2341 * struct proc *from; 2342 * struct user *to; 2343 * 2344 * Results: 2345 * None. 2346 * 2347 * Side effects: 2348 * None. 2349 * 2350 *---------------------------------------------------------------------------- 2351 */ 2352LEAF(MachSwitchFPState) 2353 mfc0 t1, MACH_COP_0_STATUS_REG # Save old SR 2354 li t0, MACH_SR_COP_1_BIT # enable the coprocessor 2355 mtc0 t0, MACH_COP_0_STATUS_REG 2356 2357 beq a0, zero, 1f # skip save if NULL pointer 2358 nop 2359/* 2360 * First read out the status register to make sure that all FP operations 2361 * have completed. 2362 */ 2363 lw a0, P_ADDR(a0) # get pointer to pcb for proc 2364 cfc1 t0, MACH_FPC_CSR # stall til FP done 2365 cfc1 t0, MACH_FPC_CSR # now get status 2366 li t3, ~MACH_SR_COP_1_BIT 2367 lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register 2368 sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status 2369 and t2, t2, t3 # clear COP_1 enable bit 2370 sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register 2371/* 2372 * Save the floating point registers. 2373 */ 2374 swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) 2375 swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) 2376 swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) 2377 swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) 2378 swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) 2379 swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) 2380 swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) 2381 swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) 2382 swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) 2383 swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) 2384 swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) 2385 swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) 2386 swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) 2387 swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) 2388 swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) 2389 swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) 2390 swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) 2391 swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) 2392 swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) 2393 swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) 2394 swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) 2395 swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) 2396 swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) 2397 swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) 2398 swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) 2399 swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) 2400 swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) 2401 swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) 2402 swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) 2403 swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) 2404 swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) 2405 swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) 2406 24071: 2408/* 2409 * Restore the floating point registers. 2410 */ 2411 lw t0, U_PCB_FPREGS+(32 * 4)(a1) # get status register 2412 lwc1 $f0, U_PCB_FPREGS+(0 * 4)(a1) 2413 lwc1 $f1, U_PCB_FPREGS+(1 * 4)(a1) 2414 lwc1 $f2, U_PCB_FPREGS+(2 * 4)(a1) 2415 lwc1 $f3, U_PCB_FPREGS+(3 * 4)(a1) 2416 lwc1 $f4, U_PCB_FPREGS+(4 * 4)(a1) 2417 lwc1 $f5, U_PCB_FPREGS+(5 * 4)(a1) 2418 lwc1 $f6, U_PCB_FPREGS+(6 * 4)(a1) 2419 lwc1 $f7, U_PCB_FPREGS+(7 * 4)(a1) 2420 lwc1 $f8, U_PCB_FPREGS+(8 * 4)(a1) 2421 lwc1 $f9, U_PCB_FPREGS+(9 * 4)(a1) 2422 lwc1 $f10, U_PCB_FPREGS+(10 * 4)(a1) 2423 lwc1 $f11, U_PCB_FPREGS+(11 * 4)(a1) 2424 lwc1 $f12, U_PCB_FPREGS+(12 * 4)(a1) 2425 lwc1 $f13, U_PCB_FPREGS+(13 * 4)(a1) 2426 lwc1 $f14, U_PCB_FPREGS+(14 * 4)(a1) 2427 lwc1 $f15, U_PCB_FPREGS+(15 * 4)(a1) 2428 lwc1 $f16, U_PCB_FPREGS+(16 * 4)(a1) 2429 lwc1 $f17, U_PCB_FPREGS+(17 * 4)(a1) 2430 lwc1 $f18, U_PCB_FPREGS+(18 * 4)(a1) 2431 lwc1 $f19, U_PCB_FPREGS+(19 * 4)(a1) 2432 lwc1 $f20, U_PCB_FPREGS+(20 * 4)(a1) 2433 lwc1 $f21, U_PCB_FPREGS+(21 * 4)(a1) 2434 lwc1 $f22, U_PCB_FPREGS+(22 * 4)(a1) 2435 lwc1 $f23, U_PCB_FPREGS+(23 * 4)(a1) 2436 lwc1 $f24, U_PCB_FPREGS+(24 * 4)(a1) 2437 lwc1 $f25, U_PCB_FPREGS+(25 * 4)(a1) 2438 lwc1 $f26, U_PCB_FPREGS+(26 * 4)(a1) 2439 lwc1 $f27, U_PCB_FPREGS+(27 * 4)(a1) 2440 lwc1 $f28, U_PCB_FPREGS+(28 * 4)(a1) 2441 lwc1 $f29, U_PCB_FPREGS+(29 * 4)(a1) 2442 lwc1 $f30, U_PCB_FPREGS+(30 * 4)(a1) 2443 lwc1 $f31, U_PCB_FPREGS+(31 * 4)(a1) 2444 2445 and t0, t0, ~MACH_FPC_EXCEPTION_BITS 2446 ctc1 t0, MACH_FPC_CSR 2447 nop 2448 2449 mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. 2450 j ra 2451 nop 2452END(MachSwitchFPState) 2453 2454/*---------------------------------------------------------------------------- 2455 * 2456 * MachSaveCurFPState -- 2457 * 2458 * Save the current floating point coprocessor state. 2459 * 2460 * MachSaveCurFPState(p) 2461 * struct proc *p; 2462 * 2463 * Results: 2464 * None. 2465 * 2466 * Side effects: 2467 * machFPCurProcPtr is cleared. 2468 * 2469 *---------------------------------------------------------------------------- 2470 */ 2471LEAF(MachSaveCurFPState) 2472 lw a0, P_ADDR(a0) # get pointer to pcb for proc 2473 mfc0 t1, MACH_COP_0_STATUS_REG # Disable interrupts and 2474 li t0, MACH_SR_COP_1_BIT # enable the coprocessor 2475 mtc0 t0, MACH_COP_0_STATUS_REG 2476 sw zero, machFPCurProcPtr # indicate state has been saved 2477/* 2478 * First read out the status register to make sure that all FP operations 2479 * have completed. 2480 */ 2481 lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register 2482 li t3, ~MACH_SR_COP_1_BIT 2483 and t2, t2, t3 # clear COP_1 enable bit 2484 cfc1 t0, MACH_FPC_CSR # stall til FP done 2485 cfc1 t0, MACH_FPC_CSR # now get status 2486 sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register 2487 sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status 2488/* 2489 * Save the floating point registers. 2490 */ 2491 swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) 2492 swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) 2493 swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) 2494 swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) 2495 swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) 2496 swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) 2497 swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) 2498 swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) 2499 swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) 2500 swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) 2501 swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) 2502 swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) 2503 swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) 2504 swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) 2505 swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) 2506 swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) 2507 swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) 2508 swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) 2509 swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) 2510 swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) 2511 swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) 2512 swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) 2513 swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) 2514 swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) 2515 swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) 2516 swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) 2517 swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) 2518 swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) 2519 swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) 2520 swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) 2521 swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) 2522 swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) 2523 2524 mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. 2525 j ra 2526 nop 2527END(MachSaveCurFPState) 2528 2529/*---------------------------------------------------------------------------- 2530 * 2531 * MachFPInterrupt -- 2532 * 2533 * Handle a floating point interrupt. 2534 * 2535 * MachFPInterrupt(statusReg, causeReg, pc) 2536 * unsigned statusReg; 2537 * unsigned causeReg; 2538 * unsigned pc; 2539 * 2540 * Results: 2541 * None. 2542 * 2543 * Side effects: 2544 * None. 2545 * 2546 *---------------------------------------------------------------------------- 2547 */ 2548NON_LEAF(MachFPInterrupt, STAND_FRAME_SIZE, ra) 2549 subu sp, sp, STAND_FRAME_SIZE 2550 mfc0 t0, MACH_COP_0_STATUS_REG 2551 sw ra, STAND_RA_OFFSET(sp) 2552 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 2553 2554 or t1, t0, MACH_SR_COP_1_BIT 2555 mtc0 t1, MACH_COP_0_STATUS_REG 2556 nop 2557 nop 2558 cfc1 t1, MACH_FPC_CSR # stall til FP done 2559 cfc1 t1, MACH_FPC_CSR # now get status 2560 nop 2561 sll t2, t1, (31 - 17) # unimplemented operation? 2562 bgez t2, 3f # no, normal trap 2563 nop 2564/* 2565 * We got an unimplemented operation trap so 2566 * fetch the instruction, compute the next PC and emulate the instruction. 2567 */ 2568 bgez a1, 1f # Check the branch delay bit. 2569 nop 2570/* 2571 * The instruction is in the branch delay slot so the branch will have to 2572 * be emulated to get the resulting PC. 2573 */ 2574 sw a2, STAND_FRAME_SIZE + 8(sp) 2575 li a0, UADDR+U_PCB_REGS # first arg is ptr to CPU registers 2576 move a1, a2 # second arg is instruction PC 2577 move a2, t1 # third arg is floating point CSR 2578 jal MachEmulateBranch # compute PC after branch 2579 move a3, zero # fourth arg is FALSE 2580/* 2581 * Now load the floating-point instruction in the branch delay slot 2582 * to be emulated. 2583 */ 2584 lw a2, STAND_FRAME_SIZE + 8(sp) # restore EXC pc 2585 b 2f 2586 lw a0, 4(a2) # a0 = coproc instruction 2587/* 2588 * This is not in the branch delay slot so calculate the resulting 2589 * PC (epc + 4) into v0 and continue to MachEmulateFP(). 2590 */ 25911: 2592 lw a0, 0(a2) # a0 = coproc instruction 2593 addu v0, a2, 4 # v0 = next pc 25942: 2595 sw v0, UADDR+U_PCB_REGS+(PC * 4) # save new pc 2596/* 2597 * Check to see if the instruction to be emulated is a floating-point 2598 * instruction. 2599 */ 2600 srl a3, a0, MACH_OPCODE_SHIFT 2601 beq a3, MACH_OPCODE_C1, 4f # this should never fail 2602/* 2603 * Send a floating point exception signal to the current process. 2604 */ 26053: 2606 lw a0, curproc # get current process 2607 cfc1 a2, MACH_FPC_CSR # code = FP execptions 2608 ctc1 zero, MACH_FPC_CSR # Clear exceptions 2609 jal trapsignal 2610 li a1, SIGFPE 2611 b FPReturn 2612 nop 2613 2614/* 2615 * Finally, we can call MachEmulateFP() where a0 is the instruction to emulate. 2616 */ 26174: 2618 jal MachEmulateFP 2619 nop 2620 2621/* 2622 * Turn off the floating point coprocessor and return. 2623 */ 2624FPReturn: 2625 mfc0 t0, MACH_COP_0_STATUS_REG 2626 lw ra, STAND_RA_OFFSET(sp) 2627 and t0, t0, ~MACH_SR_COP_1_BIT 2628 mtc0 t0, MACH_COP_0_STATUS_REG 2629 j ra 2630 addu sp, sp, STAND_FRAME_SIZE 2631END(MachFPInterrupt) 2632 2633/*---------------------------------------------------------------------------- 2634 * 2635 * MachConfigCache -- 2636 * 2637 * Size the caches. 2638 * NOTE: should only be called from mach_init(). 2639 * 2640 * Results: 2641 * None. 2642 * 2643 * Side effects: 2644 * The size of the data cache is stored into machDataCacheSize and the 2645 * size of instruction cache is stored into machInstCacheSize. 2646 * 2647 *---------------------------------------------------------------------------- 2648 */ 2649NON_LEAF(MachConfigCache, STAND_FRAME_SIZE, ra) 2650 subu sp, sp, STAND_FRAME_SIZE 2651 sw ra, STAND_RA_OFFSET(sp) # Save return address. 2652 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 2653 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2654 la v0, 1f 2655 or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2656 j v0 2657 nop 26581: 2659/* 2660 * This works because jal doesn't change pc[31..28] and the 2661 * linker still thinks SizeCache is in the cached region so it computes 2662 * the correct address without complaining. 2663 */ 2664 jal SizeCache # Get the size of the d-cache. 2665 nop 2666 sw v0, machDataCacheSize 2667 nop # Make sure sw out of pipe 2668 nop 2669 nop 2670 nop 2671 li v0, MACH_SR_SWAP_CACHES # Swap caches 2672 mtc0 v0, MACH_COP_0_STATUS_REG 2673 nop # Insure caches stable 2674 nop 2675 nop 2676 nop 2677 jal SizeCache # Get the size of the i-cache. 2678 nop 2679 mtc0 zero, MACH_COP_0_STATUS_REG # Swap back caches and enable. 2680 nop 2681 nop 2682 nop 2683 nop 2684 sw v0, machInstCacheSize 2685 la t0, 1f 2686 j t0 # Back to cached mode 2687 nop 26881: 2689 lw ra, STAND_RA_OFFSET(sp) # Restore return addr 2690 addu sp, sp, STAND_FRAME_SIZE # Restore sp. 2691 j ra 2692 nop 2693END(MachConfigCache) 2694 2695/*---------------------------------------------------------------------------- 2696 * 2697 * SizeCache -- 2698 * 2699 * Get the size of the cache. 2700 * 2701 * Results: 2702 * The size of the cache. 2703 * 2704 * Side effects: 2705 * None. 2706 * 2707 *---------------------------------------------------------------------------- 2708 */ 2709LEAF(SizeCache) 2710 mfc0 t0, MACH_COP_0_STATUS_REG # Save the current status reg. 2711 nop 2712 or v0, t0, MACH_SR_ISOL_CACHES # Isolate the caches. 2713 nop # Make sure no stores in pipe 2714 mtc0 v0, MACH_COP_0_STATUS_REG 2715 nop # Make sure isolated 2716 nop 2717 nop 2718/* 2719 * Clear cache size boundaries. 2720 */ 2721 li v0, MACH_MIN_CACHE_SIZE 2722 li v1, MACH_CACHED_MEMORY_ADDR 2723 li t2, MACH_MAX_CACHE_SIZE 27241: 2725 addu t1, v0, v1 # Compute address to clear 2726 sw zero, 0(t1) # Clear cache memory 2727 bne v0, t2, 1b 2728 sll v0, v0, 1 2729 2730 li v0, -1 2731 sw v0, 0(v1) # Store marker in cache 2732 li v0, MACH_MIN_CACHE_SIZE 27332: 2734 addu t1, v0, v1 # Compute address 2735 lw t3, 0(t1) # Look for marker 2736 nop 2737 bne t3, zero, 3f # Found marker. 2738 nop 2739 bne v0, t2, 2b # keep looking 2740 sll v0, v0, 1 # cache size * 2 2741 2742 move v0, zero # must be no cache 27433: 2744 mtc0 t0, MACH_COP_0_STATUS_REG 2745 nop # Make sure unisolated 2746 nop 2747 nop 2748 nop 2749 j ra 2750 nop 2751END(SizeCache) 2752 2753/*---------------------------------------------------------------------------- 2754 * 2755 * MachFlushCache -- 2756 * 2757 * Flush the caches. 2758 * 2759 * Results: 2760 * None. 2761 * 2762 * Side effects: 2763 * The contents of the caches is flushed. 2764 * 2765 *---------------------------------------------------------------------------- 2766 */ 2767LEAF(MachFlushCache) 2768 lw t1, machInstCacheSize # Must load before isolating 2769 lw t2, machDataCacheSize # Must load before isolating 2770 mfc0 t3, MACH_COP_0_STATUS_REG # Save the status register. 2771 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2772 la v0, 1f 2773 or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2774 j v0 2775 nop 2776/* 2777 * Flush the instruction cache. 2778 */ 27791: 2780 li v0, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES 2781 mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap caches. 2782 li t0, MACH_UNCACHED_MEMORY_ADDR 2783 subu t0, t0, t1 2784 li t1, MACH_UNCACHED_MEMORY_ADDR 2785 la v0, 1f # Run cached 2786 j v0 2787 nop 27881: 2789 addu t0, t0, 4 2790 bne t0, t1, 1b 2791 sb zero, -4(t0) 2792 2793 la v0, 1f 2794 or v0, MACH_UNCACHED_MEMORY_ADDR 2795 j v0 # Run uncached 2796 nop 2797/* 2798 * Flush the data cache. 2799 */ 28001: 2801 li v0, MACH_SR_ISOL_CACHES 2802 mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap back caches 2803 li t0, MACH_UNCACHED_MEMORY_ADDR 2804 subu t0, t0, t2 2805 la v0, 1f 2806 j v0 # Back to cached mode 2807 nop 28081: 2809 addu t0, t0, 4 2810 bne t0, t1, 1b 2811 sb zero, -4(t0) 2812 2813 nop # Insure isolated stores 2814 nop # out of pipe. 2815 nop 2816 nop 2817 mtc0 t3, MACH_COP_0_STATUS_REG # Restore status reg. 2818 nop # Insure cache unisolated. 2819 nop 2820 nop 2821 nop 2822 j ra 2823 nop 2824END(MachFlushCache) 2825 2826/*---------------------------------------------------------------------------- 2827 * 2828 * MachFlushICache -- 2829 * 2830 * void MachFlushICache(addr, len) 2831 * vm_offset_t addr, len; 2832 * 2833 * Flush instruction cache for range of addr to addr + len - 1. 2834 * The address can be any valid address so long as no TLB misses occur. 2835 * 2836 * Results: 2837 * None. 2838 * 2839 * Side effects: 2840 * The contents of the cache is flushed. 2841 * 2842 *---------------------------------------------------------------------------- 2843 */ 2844LEAF(MachFlushICache) 2845 mfc0 t0, MACH_COP_0_STATUS_REG # Save SR 2846 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2847 2848 la v1, 1f 2849 or v1, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2850 j v1 2851 nop 28521: 2853 bc0f 1b # make sure stores are complete 2854 li v1, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES 2855 mtc0 v1, MACH_COP_0_STATUS_REG 2856 nop 2857 addu a1, a1, a0 # compute ending address 28581: 2859 addu a0, a0, 4 2860 bne a0, a1, 1b 2861 sb zero, -4(a0) 2862 2863 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts 2864 j ra # return and run cached 2865 nop 2866END(MachFlushICache) 2867 2868/*---------------------------------------------------------------------------- 2869 * 2870 * MachFlushDCache -- 2871 * 2872 * void MachFlushDCache(addr, len) 2873 * vm_offset_t addr, len; 2874 * 2875 * Flush data cache for range of addr to addr + len - 1. 2876 * The address can be any valid address so long as no TLB misses occur. 2877 * (Be sure to use cached K0SEG kernel addresses) 2878 * Results: 2879 * None. 2880 * 2881 * Side effects: 2882 * The contents of the cache is flushed. 2883 * 2884 *---------------------------------------------------------------------------- 2885 */ 2886LEAF(MachFlushDCache) 2887 mfc0 t0, MACH_COP_0_STATUS_REG # Save SR 2888 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2889 2890 la v1, 1f 2891 or v1, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2892 j v1 2893 nop 28941: 2895 bc0f 1b # make sure stores are complete 2896 li v1, MACH_SR_ISOL_CACHES 2897 mtc0 v1, MACH_COP_0_STATUS_REG 2898 nop 2899 addu a1, a1, a0 # compute ending address 29001: 2901 addu a0, a0, 4 2902 bne a0, a1, 1b 2903 sb zero, -4(a0) 2904 2905 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts 2906 j ra # return and run cached 2907 nop 2908END(MachFlushDCache) 2909 2910#ifdef KADB 2911/* 2912 * Read a long and return it. 2913 * Note: addresses can be unaligned! 2914 * 2915 * long 2916L* kdbpeek(addr) 2917L* caddt_t addr; 2918L* { 2919L* return (*(long *)addr); 2920L* } 2921 */ 2922LEAF(kdbpeek) 2923 li v0, KADBERR 2924 sw v0, UADDR+U_PCB_ONFAULT 2925 and v0, a0, 3 # unaligned address? 2926 bne v0, zero, 1f 2927 nop 2928 b 2f 2929 lw v0, (a0) # aligned access 29301: 2931 lwr v0, 0(a0) # get next 4 bytes (unaligned) 2932 lwl v0, 3(a0) 29332: 2934 j ra # made it w/o errors 2935 sw zero, UADDR+U_PCB_ONFAULT 2936kadberr: 2937 li v0, 1 # trap sends us here 2938 sw v0, kdbmkfault 2939 j ra 2940 nop 2941END(kdbpeek) 2942 2943/* 2944 * Write a long to 'addr'. 2945 * Note: addresses can be unaligned! 2946 * 2947L* void 2948L* kdbpoke(addr, value) 2949L* caddt_t addr; 2950L* long value; 2951L* { 2952L* *(long *)addr = value; 2953L* } 2954 */ 2955LEAF(kdbpoke) 2956 li v0, KADBERR 2957 sw v0, UADDR+U_PCB_ONFAULT 2958 and v0, a0, 3 # unaligned address? 2959 bne v0, zero, 1f 2960 nop 2961 b 2f 2962 sw a1, (a0) # aligned access 29631: 2964 swr a1, 0(a0) # store next 4 bytes (unaligned) 2965 swl a1, 3(a0) 2966 and a0, a0, ~3 # align address for cache flush 29672: 2968 sw zero, UADDR+U_PCB_ONFAULT 2969 b MachFlushICache # flush instruction cache 2970 li a1, 8 2971END(kdbpoke) 2972 2973/* 2974 * Save registers and state so we can do a 'kdbreset' (like longjmp) later. 2975 * Always returns zero. 2976 * 2977L* int kdb_savearea[11]; 2978L* 2979L* int 2980L* kdbsetexit() 2981L* { 2982L* kdb_savearea[0] = 0; 2983L* return (0); 2984L* } 2985 */ 2986 .comm kdb_savearea, (11 * 4) 2987 2988LEAF(kdbsetexit) 2989 la a0, kdb_savearea 2990 sw s0, 0(a0) 2991 sw s1, 4(a0) 2992 sw s2, 8(a0) 2993 sw s3, 12(a0) 2994 sw s4, 16(a0) 2995 sw s5, 20(a0) 2996 sw s6, 24(a0) 2997 sw s7, 28(a0) 2998 sw sp, 32(a0) 2999 sw s8, 36(a0) 3000 sw ra, 40(a0) 3001 j ra 3002 move v0, zero 3003END(kdbsetexit) 3004 3005/* 3006 * Restore registers and state (like longjmp) and return x. 3007 * 3008L* int 3009L* kdbreset(x) 3010L* { 3011L* return (x); 3012L* } 3013 */ 3014LEAF(kdbreset) 3015 la v0, kdb_savearea 3016 lw ra, 40(v0) 3017 lw s0, 0(v0) 3018 lw s1, 4(v0) 3019 lw s2, 8(v0) 3020 lw s3, 12(v0) 3021 lw s4, 16(v0) 3022 lw s5, 20(v0) 3023 lw s6, 24(v0) 3024 lw s7, 28(v0) 3025 lw sp, 32(v0) 3026 lw s8, 36(v0) 3027 j ra 3028 move v0, a0 3029END(kdbreset) 3030 3031/* 3032 * Trap into the debugger. 3033 * 3034L* void 3035L* kdbpanic() 3036L* { 3037L* } 3038 */ 3039LEAF(kdbpanic) 3040 break MACH_BREAK_KDB_VAL 3041 j ra 3042 nop 3043END(kdbpanic) 3044#endif /* KADB */ 3045 3046#ifdef DEBUG 3047LEAF(cpu_getregs) /* XXX */ 3048 sw sp, 0(a0) 3049 la v0, cpu_getregs 3050 sw v0, 4(a0) 3051 sw ra, 8(a0) 3052 sw a0, 12(a0) 3053 sw a1, 16(a0) 3054 sw a2, 20(a0) 3055 sw a3, 24(a0) 3056 j ra 3057 sw s8, 28(a0) 3058END(cpu_getregs) 3059#endif /* DEBUG */ 3060 3061/* 3062 * Interrupt counters. 3063 */ 3064 .data 3065 .globl intrcnt, eintrcnt, intrnames, eintrnames 3066intrnames: 3067 .asciz "spur" 3068 .asciz "hil" 3069 .asciz "lev2" 3070 .asciz "lev3" 3071 .asciz "lev4" 3072 .asciz "lev5" 3073 .asciz "dma" 3074 .asciz "clock" 3075 .asciz "statclock" 3076 .asciz "nmi" 3077eintrnames: 3078 .align 2 3079intrcnt: 3080 .word 0,0,0,0,0,0,0,0,0,0 3081eintrcnt: 3082