1/* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Digital Equipment Corporation and Ralph Campbell. 7 * 8 * %sccs.include.redist.c% 9 * 10 * Copyright (C) 1989 Digital Equipment Corporation. 11 * Permission to use, copy, modify, and distribute this software and 12 * its documentation for any purpose and without fee is hereby granted, 13 * provided that the above copyright notice appears in all copies. 14 * Digital Equipment Corporation makes no representations about the 15 * suitability of this software for any purpose. It is provided "as is" 16 * without express or implied warranty. 17 * 18 * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, 19 * v 1.1 89/07/11 17:55:04 nelson Exp $ SPRITE (DECWRL) 20 * from: $Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, 21 * v 9.2 90/01/29 18:00:39 shirriff Exp $ SPRITE (DECWRL) 22 * from: $Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, 23 * v 1.1 89/07/10 14:27:41 nelson Exp $ SPRITE (DECWRL) 24 * 25 * @(#)locore.s 8.3 (Berkeley) 09/23/93 26 */ 27 28/* 29 * Contains code that is the first executed at boot time plus 30 * assembly language support routines. 31 */ 32 33#include <sys/errno.h> 34#include <sys/syscall.h> 35 36#include <machine/param.h> 37#include <machine/psl.h> 38#include <machine/reg.h> 39#include <machine/machAsmDefs.h> 40#include <machine/pte.h> 41 42#include "assym.h" 43 44 .set noreorder 45 46/* 47 * Amount to take off of the stack for the benefit of the debugger. 48 */ 49#define START_FRAME ((4 * 4) + 4 + 4) 50 51 .globl start 52start: 53 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 54 li t1, MACH_CACHED_MEMORY_ADDR # invalid address 55 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 56 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 57/* 58 * Clear the TLB (just to be safe). 59 * Align the starting value (t1), the increment (t2) and the upper bound (t3). 60 */ 61 move t1, zero 62 li t2, 1 << VMMACH_TLB_INDEX_SHIFT 63 li t3, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 641: 65 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. 66 addu t1, t1, t2 # Increment index. 67 bne t1, t3, 1b # NB: always executes next 68 tlbwi # Write the TLB entry. 69 70 la sp, start - START_FRAME 71 # la gp, _gp 72 sw zero, START_FRAME - 4(sp) # Zero out old ra for debugger 73 jal mach_init # mach_init(argc, argv, envp) 74 sw zero, START_FRAME - 8(sp) # Zero out old fp for debugger 75 76 li t0, MACH_SR_COP_1_BIT # Disable interrupts and 77 mtc0 t0, MACH_COP_0_STATUS_REG # enable the coprocessor 78 li sp, KERNELSTACK - START_FRAME # switch to standard stack 79 mfc0 t0, MACH_COP_0_PRID # read processor ID register 80 cfc1 t1, MACH_FPC_ID # read FPU ID register 81 sw t0, cpu # save PRID register 82 sw t1, fpu # save FPU ID register 83 jal main # main() 84 nop 85 86/* proc[1] == /etc/init now running here; run icode */ 87 li v0, PSL_USERSET 88 mtc0 v0, MACH_COP_0_STATUS_REG # switch to user mode 89 li v0, VM_MIN_ADDRESS 90 j v0 # jump to icode 91 rfe 92 93/* 94 * GCC2 seems to want to call __main in main() for some reason. 95 */ 96LEAF(__main) 97 j ra 98 nop 99END(__main) 100 101/* 102 * This code is copied to user data space as the first program to run. 103 * Basically, it just calls execve(); 104 */ 105 .globl icode 106icode: 107 li a1, VM_MIN_ADDRESS + (9 * 4) # address of 'icode_argv' 108 addu a0, a1, (3 * 4) # address of 'icode_fname' 109 move a2, zero # no environment 110 li v0, 59 # code for execve system call 111 syscall 112 li v0, 1 # code for exit system call 113 syscall # execve failed: call exit() 1141: b 1b # loop if exit returns 115 nop 116icode_argv: 117 .word VM_MIN_ADDRESS + (12 * 4) # address of 'icode_fname' 118 .word VM_MIN_ADDRESS + (15 * 4) # address of 'icodeEnd' 119 .word 0 120icode_fname: 121 .asciiz "/sbin/init" # occupies 3 words 122 .align 2 123 .globl icodeEnd 124icodeEnd: 125 126 .data 127 .align 2 128 .globl szicode 129szicode: 130 .word (9 + 3 + 3) * 4 # compute icodeEnd - icode 131 .text 132 133/* 134 * This code is copied the user's stack for returning from signal handlers 135 * (see sendsig() and sigreturn()). We have to compute the address 136 * of the sigcontext struct for the sigreturn call. 137 */ 138 .globl sigcode 139sigcode: 140 addu a0, sp, 16 # address of sigcontext 141 li v0, SYS_sigreturn # sigreturn(scp) 142 syscall 143 break 0 # just in case sigreturn fails 144 .globl esigcode 145esigcode: 146 147/* 148 * Primitives 149 */ 150 151/* 152 * This table is indexed by u.u_pcb.pcb_onfault in trap(). 153 * The reason for using this table rather than storing an address in 154 * u.u_pcb.pcb_onfault is simply to make the code faster. 155 */ 156 .globl onfault_table 157 .data 158 .align 2 159onfault_table: 160 .word 0 # invalid index number 161#define BADERR 1 162 .word baderr 163#define COPYERR 2 164 .word copyerr 165#define FSWBERR 3 166 .word fswberr 167#define FSWINTRBERR 4 168 .word fswintrberr 169#ifdef KADB 170#define KADBERR 5 171 .word kadberr 172#endif 173 .text 174 175/* 176 * See if access to addr with a len type instruction causes a machine check. 177 * len is length of access (1=byte, 2=short, 4=long) 178 * 179 * badaddr(addr, len) 180 * char *addr; 181 * int len; 182 */ 183LEAF(badaddr) 184 li v0, BADERR 185 bne a1, 1, 2f 186 sw v0, UADDR+U_PCB_ONFAULT 187 b 5f 188 lbu v0, (a0) 1892: 190 bne a1, 2, 4f 191 nop 192 b 5f 193 lhu v0, (a0) 1944: 195 lw v0, (a0) 1965: 197 sw zero, UADDR+U_PCB_ONFAULT 198 j ra 199 move v0, zero # made it w/o errors 200baderr: 201 j ra 202 li v0, 1 # trap sends us here 203END(badaddr) 204 205/* 206 * netorder = htonl(hostorder) 207 * hostorder = ntohl(netorder) 208 */ 209LEAF(htonl) # a0 = 0x11223344, return 0x44332211 210ALEAF(ntohl) 211 srl v1, a0, 24 # v1 = 0x00000011 212 sll v0, a0, 24 # v0 = 0x44000000 213 or v0, v0, v1 214 and v1, a0, 0xff00 215 sll v1, v1, 8 # v1 = 0x00330000 216 or v0, v0, v1 217 srl v1, a0, 8 218 and v1, v1, 0xff00 # v1 = 0x00002200 219 j ra 220 or v0, v0, v1 221END(htonl) 222 223/* 224 * netorder = htons(hostorder) 225 * hostorder = ntohs(netorder) 226 */ 227LEAF(htons) 228ALEAF(ntohs) 229 srl v0, a0, 8 230 and v0, v0, 0xff 231 sll v1, a0, 8 232 and v1, v1, 0xff00 233 j ra 234 or v0, v0, v1 235END(htons) 236 237/* 238 * bit = ffs(value) 239 */ 240LEAF(ffs) 241 beq a0, zero, 2f 242 move v0, zero 2431: 244 and v1, a0, 1 # bit set? 245 addu v0, v0, 1 246 beq v1, zero, 1b # no, continue 247 srl a0, a0, 1 2482: 249 j ra 250 nop 251END(ffs) 252 253/* 254 * strlen(str) 255 */ 256LEAF(strlen) 257 addu v1, a0, 1 2581: 259 lb v0, 0(a0) # get byte from string 260 addu a0, a0, 1 # increment pointer 261 bne v0, zero, 1b # continue if not end 262 nop 263 j ra 264 subu v0, a0, v1 # compute length - 1 for '\0' char 265END(strlen) 266 267/* 268 * NOTE: this version assumes unsigned chars in order to be "8 bit clean". 269 */ 270LEAF(strcmp) 2711: 272 lbu t0, 0(a0) # get two bytes and compare them 273 lbu t1, 0(a1) 274 beq t0, zero, LessOrEq # end of first string? 275 nop 276 bne t0, t1, NotEq 277 nop 278 lbu t0, 1(a0) # unroll loop 279 lbu t1, 1(a1) 280 beq t0, zero, LessOrEq # end of first string? 281 addu a0, a0, 2 282 beq t0, t1, 1b 283 addu a1, a1, 2 284NotEq: 285 j ra 286 subu v0, t0, t1 287LessOrEq: 288 j ra 289 subu v0, zero, t1 290END(strcmp) 291 292/* 293 * bzero(s1, n) 294 */ 295LEAF(bzero) 296ALEAF(blkclr) 297 blt a1, 12, smallclr # small amount to clear? 298 subu a3, zero, a0 # compute # bytes to word align address 299 and a3, a3, 3 300 beq a3, zero, 1f # skip if word aligned 301 subu a1, a1, a3 # subtract from remaining count 302 swr zero, 0(a0) # clear 1, 2, or 3 bytes to align 303 addu a0, a0, a3 3041: 305 and v0, a1, 3 # compute number of words left 306 subu a3, a1, v0 307 move a1, v0 308 addu a3, a3, a0 # compute ending address 3092: 310 addu a0, a0, 4 # clear words 311 bne a0, a3, 2b # unrolling loop does not help 312 sw zero, -4(a0) # since we are limited by memory speed 313smallclr: 314 ble a1, zero, 2f 315 addu a3, a1, a0 # compute ending address 3161: 317 addu a0, a0, 1 # clear bytes 318 bne a0, a3, 1b 319 sb zero, -1(a0) 3202: 321 j ra 322 nop 323END(bzero) 324 325/* 326 * bcmp(s1, s2, n) 327 */ 328LEAF(bcmp) 329 blt a2, 16, smallcmp # is it worth any trouble? 330 xor v0, a0, a1 # compare low two bits of addresses 331 and v0, v0, 3 332 subu a3, zero, a1 # compute # bytes to word align address 333 bne v0, zero, unalignedcmp # not possible to align addresses 334 and a3, a3, 3 335 336 beq a3, zero, 1f 337 subu a2, a2, a3 # subtract from remaining count 338 move v0, v1 # init v0,v1 so unmodified bytes match 339 lwr v0, 0(a0) # read 1, 2, or 3 bytes 340 lwr v1, 0(a1) 341 addu a1, a1, a3 342 bne v0, v1, nomatch 343 addu a0, a0, a3 3441: 345 and a3, a2, ~3 # compute number of whole words left 346 subu a2, a2, a3 # which has to be >= (16-3) & ~3 347 addu a3, a3, a0 # compute ending address 3482: 349 lw v0, 0(a0) # compare words 350 lw v1, 0(a1) 351 addu a0, a0, 4 352 bne v0, v1, nomatch 353 addu a1, a1, 4 354 bne a0, a3, 2b 355 nop 356 b smallcmp # finish remainder 357 nop 358unalignedcmp: 359 beq a3, zero, 2f 360 subu a2, a2, a3 # subtract from remaining count 361 addu a3, a3, a0 # compute ending address 3621: 363 lbu v0, 0(a0) # compare bytes until a1 word aligned 364 lbu v1, 0(a1) 365 addu a0, a0, 1 366 bne v0, v1, nomatch 367 addu a1, a1, 1 368 bne a0, a3, 1b 369 nop 3702: 371 and a3, a2, ~3 # compute number of whole words left 372 subu a2, a2, a3 # which has to be >= (16-3) & ~3 373 addu a3, a3, a0 # compute ending address 3743: 375 lwr v0, 0(a0) # compare words a0 unaligned, a1 aligned 376 lwl v0, 3(a0) 377 lw v1, 0(a1) 378 addu a0, a0, 4 379 bne v0, v1, nomatch 380 addu a1, a1, 4 381 bne a0, a3, 3b 382 nop 383smallcmp: 384 ble a2, zero, match 385 addu a3, a2, a0 # compute ending address 3861: 387 lbu v0, 0(a0) 388 lbu v1, 0(a1) 389 addu a0, a0, 1 390 bne v0, v1, nomatch 391 addu a1, a1, 1 392 bne a0, a3, 1b 393 nop 394match: 395 j ra 396 move v0, zero 397nomatch: 398 j ra 399 li v0, 1 400END(bcmp) 401 402/* 403 * {ov}bcopy(from, to, len) 404 */ 405LEAF(bcopy) 406ALEAF(ovbcopy) 407 addu t0, a0, a2 # t0 = end of s1 region 408 sltu t1, a1, t0 409 sltu t2, a0, a1 410 and t1, t1, t2 # t1 = true if from < to < (from+len) 411 beq t1, zero, forward # non overlapping, do forward copy 412 slt t2, a2, 12 # check for small copy 413 414 ble a2, zero, 2f 415 addu t1, a1, a2 # t1 = end of to region 4161: 417 lb v0, -1(t0) # copy bytes backwards, 418 subu t0, t0, 1 # doesnt happen often so do slow way 419 subu t1, t1, 1 420 bne t0, a0, 1b 421 sb v0, 0(t1) 4222: 423 j ra 424 nop 425forward: 426 bne t2, zero, smallcpy # do a small bcopy 427 xor v0, a0, a1 # compare low two bits of addresses 428 and v0, v0, 3 429 subu a3, zero, a1 # compute # bytes to word align address 430 beq v0, zero, aligned # addresses can be word aligned 431 and a3, a3, 3 432 433 beq a3, zero, 1f 434 subu a2, a2, a3 # subtract from remaining count 435 lwr v0, 0(a0) # get next 4 bytes (unaligned) 436 lwl v0, 3(a0) 437 addu a0, a0, a3 438 swr v0, 0(a1) # store 1, 2, or 3 bytes to align a1 439 addu a1, a1, a3 4401: 441 and v0, a2, 3 # compute number of words left 442 subu a3, a2, v0 443 move a2, v0 444 addu a3, a3, a0 # compute ending address 4452: 446 lwr v0, 0(a0) # copy words a0 unaligned, a1 aligned 447 lwl v0, 3(a0) 448 addu a0, a0, 4 449 addu a1, a1, 4 450 bne a0, a3, 2b 451 sw v0, -4(a1) 452 b smallcpy 453 nop 454aligned: 455 beq a3, zero, 1f 456 subu a2, a2, a3 # subtract from remaining count 457 lwr v0, 0(a0) # copy 1, 2, or 3 bytes to align 458 addu a0, a0, a3 459 swr v0, 0(a1) 460 addu a1, a1, a3 4611: 462 and v0, a2, 3 # compute number of whole words left 463 subu a3, a2, v0 464 move a2, v0 465 addu a3, a3, a0 # compute ending address 4662: 467 lw v0, 0(a0) # copy words 468 addu a0, a0, 4 469 addu a1, a1, 4 470 bne a0, a3, 2b 471 sw v0, -4(a1) 472smallcpy: 473 ble a2, zero, 2f 474 addu a3, a2, a0 # compute ending address 4751: 476 lbu v0, 0(a0) # copy bytes 477 addu a0, a0, 1 478 addu a1, a1, 1 479 bne a0, a3, 1b 480 sb v0, -1(a1) 4812: 482 j ra 483 move v0, zero 484END(bcopy) 485 486/* 487 * Copy a null terminated string within the kernel address space. 488 * Maxlength may be null if count not wanted. 489 * copystr(fromaddr, toaddr, maxlength, &lencopied) 490 * caddr_t fromaddr; 491 * caddr_t toaddr; 492 * u_int maxlength; 493 * u_int *lencopied; 494 */ 495LEAF(copystr) 496 move t2, a2 # Save the number of bytes 4971: 498 lbu t0, 0(a0) 499 subu a2, a2, 1 500 beq t0, zero, 2f 501 sb t0, 0(a1) 502 addu a0, a0, 1 503 bne a2, zero, 1b 504 addu a1, a1, 1 5052: 506 beq a3, zero, 3f 507 subu a2, t2, a2 # compute length copied 508 sw a2, 0(a3) 5093: 510 j ra 511 move v0, zero 512END(copystr) 513 514/* 515 * Copy a null terminated string from the user address space into 516 * the kernel address space. 517 * 518 * copyinstr(fromaddr, toaddr, maxlength, &lencopied) 519 * caddr_t fromaddr; 520 * caddr_t toaddr; 521 * u_int maxlength; 522 * u_int *lencopied; 523 */ 524NON_LEAF(copyinstr, STAND_FRAME_SIZE, ra) 525 subu sp, sp, STAND_FRAME_SIZE 526 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 527 sw ra, STAND_RA_OFFSET(sp) 528 blt a0, zero, copyerr # make sure address is in user space 529 li v0, COPYERR 530 jal copystr 531 sw v0, UADDR+U_PCB_ONFAULT 532 lw ra, STAND_RA_OFFSET(sp) 533 sw zero, UADDR+U_PCB_ONFAULT 534 addu sp, sp, STAND_FRAME_SIZE 535 j ra 536 move v0, zero 537END(copyinstr) 538 539/* 540 * Copy a null terminated string from the kernel address space into 541 * the user address space. 542 * 543 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied) 544 * caddr_t fromaddr; 545 * caddr_t toaddr; 546 * u_int maxlength; 547 * u_int *lencopied; 548 */ 549NON_LEAF(copyoutstr, STAND_FRAME_SIZE, ra) 550 subu sp, sp, STAND_FRAME_SIZE 551 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 552 sw ra, STAND_RA_OFFSET(sp) 553 blt a1, zero, copyerr # make sure address is in user space 554 li v0, COPYERR 555 jal copystr 556 sw v0, UADDR+U_PCB_ONFAULT 557 lw ra, STAND_RA_OFFSET(sp) 558 sw zero, UADDR+U_PCB_ONFAULT 559 addu sp, sp, STAND_FRAME_SIZE 560 j ra 561 move v0, zero 562END(copyoutstr) 563 564/* 565 * Copy specified amount of data from user space into the kernel 566 * copyin(from, to, len) 567 * caddr_t *from; (user source address) 568 * caddr_t *to; (kernel destination address) 569 * unsigned len; 570 */ 571NON_LEAF(copyin, STAND_FRAME_SIZE, ra) 572 subu sp, sp, STAND_FRAME_SIZE 573 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 574 sw ra, STAND_RA_OFFSET(sp) 575 blt a0, zero, copyerr # make sure address is in user space 576 li v0, COPYERR 577 jal bcopy 578 sw v0, UADDR+U_PCB_ONFAULT 579 lw ra, STAND_RA_OFFSET(sp) 580 sw zero, UADDR+U_PCB_ONFAULT 581 addu sp, sp, STAND_FRAME_SIZE 582 j ra 583 move v0, zero 584END(copyin) 585 586/* 587 * Copy specified amount of data from kernel to the user space 588 * copyout(from, to, len) 589 * caddr_t *from; (kernel source address) 590 * caddr_t *to; (user destination address) 591 * unsigned len; 592 */ 593NON_LEAF(copyout, STAND_FRAME_SIZE, ra) 594 subu sp, sp, STAND_FRAME_SIZE 595 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 596 sw ra, STAND_RA_OFFSET(sp) 597 blt a1, zero, copyerr # make sure address is in user space 598 li v0, COPYERR 599 jal bcopy 600 sw v0, UADDR+U_PCB_ONFAULT 601 lw ra, STAND_RA_OFFSET(sp) 602 sw zero, UADDR+U_PCB_ONFAULT 603 addu sp, sp, STAND_FRAME_SIZE 604 j ra 605 move v0, zero 606END(copyout) 607 608LEAF(copyerr) 609 lw ra, STAND_RA_OFFSET(sp) 610 sw zero, UADDR+U_PCB_ONFAULT 611 addu sp, sp, STAND_FRAME_SIZE 612 j ra 613 li v0, EFAULT # return error 614END(copyerr) 615 616/* 617 * Copy data to the DMA buffer. 618 * The DMA bufffer can only be written one short at a time 619 * (and takes ~14 cycles). 620 * 621 * CopyToBuffer(src, dst, length) 622 * u_short *src; NOTE: must be short aligned 623 * u_short *dst; 624 * int length; 625 */ 626LEAF(CopyToBuffer) 627 blez a2, 2f 628 nop 6291: 630 lhu t0, 0(a0) # read 2 bytes of data 631 subu a2, a2, 2 632 addu a0, a0, 2 633 addu a1, a1, 4 634 bgtz a2, 1b 635 sh t0, -4(a1) # write 2 bytes of data to buffer 6362: 637 j ra 638 nop 639END(CopyToBuffer) 640 641/* 642 * Copy data from the DMA buffer. 643 * The DMA bufffer can only be read one short at a time 644 * (and takes ~12 cycles). 645 * 646 * CopyFromBuffer(src, dst, length) 647 * u_short *src; 648 * char *dst; 649 * int length; 650 */ 651LEAF(CopyFromBuffer) 652 and t0, a1, 1 # test for aligned dst 653 beq t0, zero, 3f 654 nop 655 blt a2, 2, 7f # at least 2 bytes to copy? 656 nop 6571: 658 lhu t0, 0(a0) # read 2 bytes of data from buffer 659 addu a0, a0, 4 # keep buffer pointer word aligned 660 addu a1, a1, 2 661 subu a2, a2, 2 662 sb t0, -2(a1) 663 srl t0, t0, 8 664 bge a2, 2, 1b 665 sb t0, -1(a1) 6663: 667 blt a2, 2, 7f # at least 2 bytes to copy? 668 nop 6696: 670 lhu t0, 0(a0) # read 2 bytes of data from buffer 671 addu a0, a0, 4 # keep buffer pointer word aligned 672 addu a1, a1, 2 673 subu a2, a2, 2 674 bge a2, 2, 6b 675 sh t0, -2(a1) 6767: 677 ble a2, zero, 9f # done? 678 nop 679 lhu t0, 0(a0) # copy one more byte 680 nop 681 sb t0, 0(a1) 6829: 683 j ra 684 nop 685END(CopyFromBuffer) 686 687/* 688 * Copy the kernel stack to the new process and save the current context so 689 * the new process will return nonzero when it is resumed by cpu_switch(). 690 * 691 * copykstack(up) 692 * struct user *up; 693 */ 694LEAF(copykstack) 695 subu v0, sp, UADDR # compute offset into stack 696 addu v0, v0, a0 # v0 = new stack address 697 move v1, sp # v1 = old stack address 698 li t1, KERNELSTACK 6991: 700 lw t0, 0(v1) # copy stack data 701 addu v1, v1, 4 702 sw t0, 0(v0) 703 bne v1, t1, 1b 704 addu v0, v0, 4 705 /* FALLTHROUGH */ 706/* 707 * Save registers and state so we can do a longjmp later. 708 * Note: this only works if p != curproc since 709 * cpu_switch() will copy over pcb_context. 710 * 711 * savectx(up) 712 * struct user *up; 713 */ 714ALEAF(savectx) 715 sw s0, U_PCB_CONTEXT+0(a0) 716 sw s1, U_PCB_CONTEXT+4(a0) 717 sw s2, U_PCB_CONTEXT+8(a0) 718 sw s3, U_PCB_CONTEXT+12(a0) 719 mfc0 v0, MACH_COP_0_STATUS_REG 720 sw s4, U_PCB_CONTEXT+16(a0) 721 sw s5, U_PCB_CONTEXT+20(a0) 722 sw s6, U_PCB_CONTEXT+24(a0) 723 sw s7, U_PCB_CONTEXT+28(a0) 724 sw sp, U_PCB_CONTEXT+32(a0) 725 sw s8, U_PCB_CONTEXT+36(a0) 726 sw ra, U_PCB_CONTEXT+40(a0) 727 sw v0, U_PCB_CONTEXT+44(a0) 728 j ra 729 move v0, zero 730END(copykstack) 731 732/* 733 * The following primitives manipulate the run queues. _whichqs tells which 734 * of the 32 queues _qs have processes in them. Setrunqueue puts processes 735 * into queues, Remrq removes them from queues. The running process is on 736 * no queue, other processes are on a queue related to p->p_priority, divided 737 * by 4 actually to shrink the 0-127 range of priorities into the 32 available 738 * queues. 739 */ 740/* 741 * setrunqueue(p) 742 * proc *p; 743 * 744 * Call should be made at splclock(), and p->p_stat should be SRUN. 745 */ 746NON_LEAF(setrunqueue, STAND_FRAME_SIZE, ra) 747 subu sp, sp, STAND_FRAME_SIZE 748 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 749 lw t0, P_BACK(a0) ## firewall: p->p_back must be 0 750 sw ra, STAND_RA_OFFSET(sp) ## 751 beq t0, zero, 1f ## 752 lbu t0, P_PRIORITY(a0) # put on p->p_priority / 4 queue 753 PANIC("setrunqueue") ## 7541: 755 li t1, 1 # compute corresponding bit 756 srl t0, t0, 2 # compute index into 'whichqs' 757 sll t1, t1, t0 758 lw t2, whichqs # set corresponding bit 759 nop 760 or t2, t2, t1 761 sw t2, whichqs 762 sll t0, t0, 3 # compute index into 'qs' 763 la t1, qs 764 addu t0, t0, t1 # t0 = qp = &qs[pri >> 2] 765 lw t1, P_BACK(t0) # t1 = qp->ph_rlink 766 sw t0, P_FORW(a0) # p->p_forw = qp 767 sw t1, P_BACK(a0) # p->p_back = qp->ph_rlink 768 sw a0, P_FORW(t1) # p->p_back->p_forw = p; 769 sw a0, P_BACK(t0) # qp->ph_rlink = p 770 j ra 771 addu sp, sp, STAND_FRAME_SIZE 772END(setrunqueue) 773 774/* 775 * Remrq(p) 776 * 777 * Call should be made at splclock(). 778 */ 779NON_LEAF(remrq, STAND_FRAME_SIZE, ra) 780 subu sp, sp, STAND_FRAME_SIZE 781 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 782 lbu t0, P_PRIORITY(a0) # get from p->p_priority / 4 queue 783 li t1, 1 # compute corresponding bit 784 srl t0, t0, 2 # compute index into 'whichqs' 785 lw t2, whichqs # check corresponding bit 786 sll t1, t1, t0 787 and v0, t2, t1 788 sw ra, STAND_RA_OFFSET(sp) ## 789 bne v0, zero, 1f ## 790 lw v0, P_BACK(a0) # v0 = p->p_back 791 PANIC("remrq") ## it wasnt recorded to be on its q 7921: 793 lw v1, P_FORW(a0) # v1 = p->p_forw 794 nop 795 sw v1, P_FORW(v0) # p->p_back->p_forw = p->p_forw; 796 sw v0, P_BACK(v1) # p->p_forw->p_back = p->r_rlink 797 sll t0, t0, 3 # compute index into 'qs' 798 la v0, qs 799 addu t0, t0, v0 # t0 = qp = &qs[pri >> 2] 800 lw v0, P_FORW(t0) # check if queue empty 801 nop 802 bne v0, t0, 2f # No. qp->ph_link != qp 803 nop 804 xor t2, t2, t1 # clear corresponding bit in 'whichqs' 805 sw t2, whichqs 8062: 807 sw zero, P_BACK(a0) ## for firewall checking 808 j ra 809 addu sp, sp, STAND_FRAME_SIZE 810END(remrq) 811 812/* 813 * switch_exit() 814 * 815 * At exit of a process, do a cpu_switch for the last time. 816 * The mapping of the pcb at p->p_addr has already been deleted, 817 * and the memory for the pcb+stack has been freed. 818 * All interrupts should be blocked at this point. 819 */ 820LEAF(switch_exit) 821 la v1, nullproc # save state into garbage proc 822 lw t0, P_UPTE+0(v1) # t0 = first u. pte 823 lw t1, P_UPTE+4(v1) # t1 = 2nd u. pte 824 li v0, UADDR # v0 = first HI entry 825 mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register 826 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 827 mtc0 t0, MACH_COP_0_TLB_LOW # init low entry 828 li t0, 1 << VMMACH_TLB_INDEX_SHIFT 829 tlbwi # Write the TLB entry. 830 addu v0, v0, NBPG # 2nd HI entry 831 mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register 832 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 833 mtc0 t1, MACH_COP_0_TLB_LOW # init low entry 834 sw zero, curproc 835 tlbwi # Write the TLB entry. 836 b cpu_switch 837 li sp, KERNELSTACK - START_FRAME # switch to standard stack 838END(switch_exit) 839 840/* 841 * When no processes are on the runq, cpu_switch branches to idle 842 * to wait for something to come ready. 843 * Note: this is really a part of cpu_switch() but defined here for kernel 844 * profiling. 845 */ 846LEAF(idle) 847 li t0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 848 mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts 849 sw zero, curproc # set curproc NULL for stats 8501: 851 lw t0, whichqs # look for non-empty queue 852 nop 853 beq t0, zero, 1b 854 nop 855 b sw1 856 mtc0 zero, MACH_COP_0_STATUS_REG # Disable all interrupts 857END(idle) 858 859/* 860 * cpu_switch() 861 * Find the highest priority process and resume it. 862 */ 863NON_LEAF(cpu_switch, STAND_FRAME_SIZE, ra) 864 sw sp, UADDR+U_PCB_CONTEXT+32 # save old sp 865 subu sp, sp, STAND_FRAME_SIZE 866 sw ra, STAND_RA_OFFSET(sp) 867 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 868 lw t2, cnt+V_SWTCH # for statistics 869 lw t1, whichqs # look for non-empty queue 870 sw s0, UADDR+U_PCB_CONTEXT+0 # do a 'savectx()' 871 sw s1, UADDR+U_PCB_CONTEXT+4 872 sw s2, UADDR+U_PCB_CONTEXT+8 873 sw s3, UADDR+U_PCB_CONTEXT+12 874 mfc0 t0, MACH_COP_0_STATUS_REG # t0 = saved status register 875 sw s4, UADDR+U_PCB_CONTEXT+16 876 sw s5, UADDR+U_PCB_CONTEXT+20 877 sw s6, UADDR+U_PCB_CONTEXT+24 878 sw s7, UADDR+U_PCB_CONTEXT+28 879 sw s8, UADDR+U_PCB_CONTEXT+36 880 sw ra, UADDR+U_PCB_CONTEXT+40 # save return address 881 sw t0, UADDR+U_PCB_CONTEXT+44 # save status register 882 addu t2, t2, 1 883 sw t2, cnt+V_SWTCH 884 beq t1, zero, idle # if none, idle 885 mtc0 zero, MACH_COP_0_STATUS_REG # Disable all interrupts 886sw1: 887 nop # wait for intrs disabled 888 nop 889 lw t0, whichqs # look for non-empty queue 890 li t2, -1 # t2 = lowest bit set 891 beq t0, zero, idle # if none, idle 892 move t3, t0 # t3 = saved whichqs 8931: 894 addu t2, t2, 1 895 and t1, t0, 1 # bit set? 896 beq t1, zero, 1b 897 srl t0, t0, 1 # try next bit 898/* 899 * Remove process from queue. 900 */ 901 sll t0, t2, 3 902 la t1, qs 903 addu t0, t0, t1 # t0 = qp = &qs[highbit] 904 lw a0, P_FORW(t0) # a0 = p = highest pri process 905 nop 906 lw v0, P_FORW(a0) # v0 = p->p_forw 907 bne t0, a0, 2f # make sure something in queue 908 sw v0, P_FORW(t0) # qp->ph_link = p->p_forw; 909 PANIC("cpu_switch") # nothing in queue 9102: 911 sw t0, P_BACK(v0) # p->p_forw->p_back = qp 912 bne v0, t0, 3f # queue still not empty 913 sw zero, P_BACK(a0) ## for firewall checking 914 li v1, 1 # compute bit in 'whichqs' 915 sll v1, v1, t2 916 xor t3, t3, v1 # clear bit in 'whichqs' 917 sw t3, whichqs 9183: 919/* 920 * Switch to new context. 921 */ 922 sw zero, want_resched 923 jal pmap_alloc_tlbpid # v0 = TLB PID 924 move s0, a0 # save p 925 move a0, s0 # restore p 926 sw a0, curproc # set curproc 927 sll v0, v0, VMMACH_TLB_PID_SHIFT # v0 = aligned PID 928 lw t0, P_UPTE+0(a0) # t0 = first u. pte 929 lw t1, P_UPTE+4(a0) # t1 = 2nd u. pte 930 or v0, v0, UADDR # v0 = first HI entry 931/* 932 * Resume process indicated by the pte's for its u struct 933 * NOTE: This is hard coded to UPAGES == 2. 934 * Also, there should be no TLB faults at this point. 935 */ 936 mtc0 zero, MACH_COP_0_TLB_INDEX # set the index register 937 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 938 mtc0 t0, MACH_COP_0_TLB_LOW # init low entry 939 li t0, 1 << VMMACH_TLB_INDEX_SHIFT 940 tlbwi # Write the TLB entry. 941 addu v0, v0, NBPG # 2nd HI entry 942 mtc0 t0, MACH_COP_0_TLB_INDEX # set the index register 943 mtc0 v0, MACH_COP_0_TLB_HI # init high entry 944 mtc0 t1, MACH_COP_0_TLB_LOW # init low entry 945 nop 946 tlbwi # Write the TLB entry. 947/* 948 * Now running on new u struct. 949 * Restore registers and return. 950 */ 951 lw v0, UADDR+U_PCB_CONTEXT+44 # restore kernel context 952 lw ra, UADDR+U_PCB_CONTEXT+40 953 lw s0, UADDR+U_PCB_CONTEXT+0 954 lw s1, UADDR+U_PCB_CONTEXT+4 955 lw s2, UADDR+U_PCB_CONTEXT+8 956 lw s3, UADDR+U_PCB_CONTEXT+12 957 lw s4, UADDR+U_PCB_CONTEXT+16 958 lw s5, UADDR+U_PCB_CONTEXT+20 959 lw s6, UADDR+U_PCB_CONTEXT+24 960 lw s7, UADDR+U_PCB_CONTEXT+28 961 lw sp, UADDR+U_PCB_CONTEXT+32 962 lw s8, UADDR+U_PCB_CONTEXT+36 963 mtc0 v0, MACH_COP_0_STATUS_REG 964 j ra 965 li v0, 1 # possible return to 'savectx()' 966END(cpu_switch) 967 968/* 969 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to 970 * user text space. 971 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to 972 * user data space. 973 */ 974LEAF(fuword) 975ALEAF(fuiword) 976 blt a0, zero, fswberr # make sure address is in user space 977 li v0, FSWBERR 978 sw v0, UADDR+U_PCB_ONFAULT 979 lw v0, 0(a0) # fetch word 980 j ra 981 sw zero, UADDR+U_PCB_ONFAULT 982END(fuword) 983 984LEAF(fusword) 985ALEAF(fuisword) 986 blt a0, zero, fswberr # make sure address is in user space 987 li v0, FSWBERR 988 sw v0, UADDR+U_PCB_ONFAULT 989 lhu v0, 0(a0) # fetch short 990 j ra 991 sw zero, UADDR+U_PCB_ONFAULT 992END(fusword) 993 994LEAF(fubyte) 995ALEAF(fuibyte) 996 blt a0, zero, fswberr # make sure address is in user space 997 li v0, FSWBERR 998 sw v0, UADDR+U_PCB_ONFAULT 999 lbu v0, 0(a0) # fetch byte 1000 j ra 1001 sw zero, UADDR+U_PCB_ONFAULT 1002END(fubyte) 1003 1004LEAF(suword) 1005 blt a0, zero, fswberr # make sure address is in user space 1006 li v0, FSWBERR 1007 sw v0, UADDR+U_PCB_ONFAULT 1008 sw a1, 0(a0) # store word 1009 sw zero, UADDR+U_PCB_ONFAULT 1010 j ra 1011 move v0, zero 1012END(suword) 1013 1014/* 1015 * Have to flush instruction cache afterwards. 1016 */ 1017LEAF(suiword) 1018 blt a0, zero, fswberr # make sure address is in user space 1019 li v0, FSWBERR 1020 sw v0, UADDR+U_PCB_ONFAULT 1021 sw a1, 0(a0) # store word 1022 sw zero, UADDR+U_PCB_ONFAULT 1023 move v0, zero 1024 b MachFlushICache # NOTE: this should not clobber v0! 1025 li a1, 4 # size of word 1026END(suiword) 1027 1028/* 1029 * Will have to flush the instruction cache if byte merging is done in hardware. 1030 */ 1031LEAF(susword) 1032ALEAF(suisword) 1033 blt a0, zero, fswberr # make sure address is in user space 1034 li v0, FSWBERR 1035 sw v0, UADDR+U_PCB_ONFAULT 1036 sh a1, 0(a0) # store short 1037 sw zero, UADDR+U_PCB_ONFAULT 1038 j ra 1039 move v0, zero 1040END(susword) 1041 1042LEAF(subyte) 1043ALEAF(suibyte) 1044 blt a0, zero, fswberr # make sure address is in user space 1045 li v0, FSWBERR 1046 sw v0, UADDR+U_PCB_ONFAULT 1047 sb a1, 0(a0) # store byte 1048 sw zero, UADDR+U_PCB_ONFAULT 1049 j ra 1050 move v0, zero 1051END(subyte) 1052 1053LEAF(fswberr) 1054 j ra 1055 li v0, -1 1056END(fswberr) 1057 1058/* 1059 * fuswintr and suswintr are just like fusword and susword except that if 1060 * the page is not in memory or would cause a trap, then we return an error. 1061 * The important thing is to prevent sleep() and switch(). 1062 */ 1063LEAF(fuswintr) 1064 blt a0, zero, fswintrberr # make sure address is in user space 1065 li v0, FSWINTRBERR 1066 sw v0, UADDR+U_PCB_ONFAULT 1067 lhu v0, 0(a0) # fetch short 1068 j ra 1069 sw zero, UADDR+U_PCB_ONFAULT 1070END(fuswintr) 1071 1072LEAF(suswintr) 1073 blt a0, zero, fswintrberr # make sure address is in user space 1074 li v0, FSWINTRBERR 1075 sw v0, UADDR+U_PCB_ONFAULT 1076 sh a1, 0(a0) # store short 1077 sw zero, UADDR+U_PCB_ONFAULT 1078 j ra 1079 move v0, zero 1080END(suswintr) 1081 1082LEAF(fswintrberr) 1083 j ra 1084 li v0, -1 1085END(fswintrberr) 1086 1087/* 1088 * Insert 'p' after 'q'. 1089 * _insque(p, q) 1090 * caddr_t p, q; 1091 */ 1092LEAF(_insque) 1093 lw v0, 0(a1) # v0 = q->next 1094 sw a1, 4(a0) # p->prev = q 1095 sw v0, 0(a0) # p->next = q->next 1096 sw a0, 4(v0) # q->next->prev = p 1097 j ra 1098 sw a0, 0(a1) # q->next = p 1099END(_insque) 1100 1101/* 1102 * Remove item 'p' from queue. 1103 * _remque(p) 1104 * caddr_t p; 1105 */ 1106LEAF(_remque) 1107 lw v0, 0(a0) # v0 = p->next 1108 lw v1, 4(a0) # v1 = p->prev 1109 nop 1110 sw v0, 0(v1) # p->prev->next = p->next 1111 j ra 1112 sw v1, 4(v0) # p->next->prev = p->prev 1113END(_remque) 1114 1115/* 1116 * This code is copied to the UTLB exception vector address to 1117 * handle user level TLB translation misses. 1118 * NOTE: This code must be relocatable!!! 1119 */ 1120 .globl MachUTLBMiss 1121MachUTLBMiss: 1122 .set noat 1123 mfc0 k0, MACH_COP_0_BAD_VADDR # get the virtual address 1124 lw k1, UADDR+U_PCB_SEGTAB # get the current segment table 1125 bltz k0, 1f # R3000 chip bug 1126 srl k0, k0, SEGSHIFT # compute segment table index 1127 sll k0, k0, 2 1128 addu k1, k1, k0 1129 mfc0 k0, MACH_COP_0_BAD_VADDR # get the virtual address 1130 lw k1, 0(k1) # get pointer to segment map 1131 srl k0, k0, PGSHIFT - 2 # compute segment map index 1132 andi k0, k0, (NPTEPG - 1) << 2 1133 beq k1, zero, 2f # invalid segment map 1134 addu k1, k1, k0 # index into segment map 1135 lw k0, 0(k1) # get page PTE 1136 nop 1137 beq k0, zero, 2f # dont load invalid entries 1138 mtc0 k0, MACH_COP_0_TLB_LOW 1139 mfc0 k1, MACH_COP_0_EXC_PC # get return address 1140 tlbwr # update TLB 1141 j k1 1142 rfe 11431: 1144 mfc0 k1, MACH_COP_0_EXC_PC # get return address 1145 nop 1146 j k1 1147 rfe 11482: 1149 j SlowFault # handle the rest 1150 nop 1151 .set at 1152 .globl MachUTLBMissEnd 1153MachUTLBMissEnd: 1154 1155/* 1156 * This code is copied to the general exception vector address to 1157 * handle all execptions except RESET and UTLBMiss. 1158 * NOTE: This code must be relocatable!!! 1159 */ 1160 .globl MachException 1161MachException: 1162/* 1163 * Find out what mode we came from and jump to the proper handler. 1164 */ 1165 .set noat 1166 mfc0 k0, MACH_COP_0_STATUS_REG # Get the status register 1167 mfc0 k1, MACH_COP_0_CAUSE_REG # Get the cause register value. 1168 and k0, k0, MACH_SR_KU_PREV # test for user mode 1169 sll k0, k0, 3 # shift user bit for cause index 1170 and k1, k1, MACH_CR_EXC_CODE # Mask out the cause bits. 1171 or k1, k1, k0 # change index to user table 11721: 1173 la k0, machExceptionTable # get base of the jump table 1174 addu k0, k0, k1 # Get the address of the 1175 # function entry. Note that 1176 # the cause is already 1177 # shifted left by 2 bits so 1178 # we dont have to shift. 1179 lw k0, 0(k0) # Get the function address 1180 nop 1181 j k0 # Jump to the function. 1182 nop 1183 .set at 1184 .globl MachExceptionEnd 1185MachExceptionEnd: 1186 1187/* 1188 * We couldn't find a TLB entry. 1189 * Find out what mode we came from and call the appropriate handler. 1190 */ 1191SlowFault: 1192 .set noat 1193 mfc0 k0, MACH_COP_0_STATUS_REG 1194 nop 1195 and k0, k0, MACH_SR_KU_PREV 1196 bne k0, zero, MachUserGenException 1197 nop 1198 .set at 1199/* 1200 * Fall though ... 1201 */ 1202 1203/*---------------------------------------------------------------------------- 1204 * 1205 * MachKernGenException -- 1206 * 1207 * Handle an exception from kernel mode. 1208 * 1209 * Results: 1210 * None. 1211 * 1212 * Side effects: 1213 * None. 1214 * 1215 *---------------------------------------------------------------------------- 1216 */ 1217 1218/* 1219 * The kernel exception stack contains 18 saved general registers, 1220 * the status register and the multiply lo and high registers. 1221 * In addition, we set this up for linkage conventions. 1222 */ 1223#define KERN_REG_SIZE (18 * 4) 1224#define KERN_REG_OFFSET (STAND_FRAME_SIZE) 1225#define KERN_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) 1226#define KERN_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) 1227#define KERN_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) 1228#define KERN_EXC_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) 1229 1230NNON_LEAF(MachKernGenException, KERN_EXC_FRAME_SIZE, ra) 1231 .set noat 1232#ifdef KADB 1233 la k0, kdbpcb # save registers for kadb 1234 sw s0, (S0 * 4)(k0) 1235 sw s1, (S1 * 4)(k0) 1236 sw s2, (S2 * 4)(k0) 1237 sw s3, (S3 * 4)(k0) 1238 sw s4, (S4 * 4)(k0) 1239 sw s5, (S5 * 4)(k0) 1240 sw s6, (S6 * 4)(k0) 1241 sw s7, (S7 * 4)(k0) 1242 sw s8, (S8 * 4)(k0) 1243 sw gp, (GP * 4)(k0) 1244 sw sp, (SP * 4)(k0) 1245#endif 1246 subu sp, sp, KERN_EXC_FRAME_SIZE 1247 .mask 0x80000000, (STAND_RA_OFFSET - KERN_EXC_FRAME_SIZE) 1248/* 1249 * Save the relevant kernel registers onto the stack. 1250 * We don't need to save s0 - s8, sp and gp because 1251 * the compiler does it for us. 1252 */ 1253 sw AT, KERN_REG_OFFSET + 0(sp) 1254 sw v0, KERN_REG_OFFSET + 4(sp) 1255 sw v1, KERN_REG_OFFSET + 8(sp) 1256 sw a0, KERN_REG_OFFSET + 12(sp) 1257 mflo v0 1258 mfhi v1 1259 sw a1, KERN_REG_OFFSET + 16(sp) 1260 sw a2, KERN_REG_OFFSET + 20(sp) 1261 sw a3, KERN_REG_OFFSET + 24(sp) 1262 sw t0, KERN_REG_OFFSET + 28(sp) 1263 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1264 sw t1, KERN_REG_OFFSET + 32(sp) 1265 sw t2, KERN_REG_OFFSET + 36(sp) 1266 sw t3, KERN_REG_OFFSET + 40(sp) 1267 sw t4, KERN_REG_OFFSET + 44(sp) 1268 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1269 sw t5, KERN_REG_OFFSET + 48(sp) 1270 sw t6, KERN_REG_OFFSET + 52(sp) 1271 sw t7, KERN_REG_OFFSET + 56(sp) 1272 sw t8, KERN_REG_OFFSET + 60(sp) 1273 mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr. 1274 sw t9, KERN_REG_OFFSET + 64(sp) 1275 sw ra, KERN_REG_OFFSET + 68(sp) 1276 sw v0, KERN_MULT_LO_OFFSET(sp) 1277 sw v1, KERN_MULT_HI_OFFSET(sp) 1278 mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. 1279 sw a0, KERN_SR_OFFSET(sp) 1280/* 1281 * Call the exception handler. 1282 */ 1283 jal trap 1284 sw a3, STAND_RA_OFFSET(sp) # for debugging 1285/* 1286 * Restore registers and return from the exception. 1287 * v0 contains the return address. 1288 */ 1289 lw a0, KERN_SR_OFFSET(sp) 1290 lw t0, KERN_MULT_LO_OFFSET(sp) 1291 lw t1, KERN_MULT_HI_OFFSET(sp) 1292 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1293 mtlo t0 1294 mthi t1 1295 move k0, v0 1296 lw AT, KERN_REG_OFFSET + 0(sp) 1297 lw v0, KERN_REG_OFFSET + 4(sp) 1298 lw v1, KERN_REG_OFFSET + 8(sp) 1299 lw a0, KERN_REG_OFFSET + 12(sp) 1300 lw a1, KERN_REG_OFFSET + 16(sp) 1301 lw a2, KERN_REG_OFFSET + 20(sp) 1302 lw a3, KERN_REG_OFFSET + 24(sp) 1303 lw t0, KERN_REG_OFFSET + 28(sp) 1304 lw t1, KERN_REG_OFFSET + 32(sp) 1305 lw t2, KERN_REG_OFFSET + 36(sp) 1306 lw t3, KERN_REG_OFFSET + 40(sp) 1307 lw t4, KERN_REG_OFFSET + 44(sp) 1308 lw t5, KERN_REG_OFFSET + 48(sp) 1309 lw t6, KERN_REG_OFFSET + 52(sp) 1310 lw t7, KERN_REG_OFFSET + 56(sp) 1311 lw t8, KERN_REG_OFFSET + 60(sp) 1312 lw t9, KERN_REG_OFFSET + 64(sp) 1313 lw ra, KERN_REG_OFFSET + 68(sp) 1314 addu sp, sp, KERN_EXC_FRAME_SIZE 1315 j k0 # Now return from the 1316 rfe # exception. 1317 .set at 1318END(MachKernGenException) 1319 1320/*---------------------------------------------------------------------------- 1321 * 1322 * MachUserGenException -- 1323 * 1324 * Handle an exception from user mode. 1325 * 1326 * Results: 1327 * None. 1328 * 1329 * Side effects: 1330 * None. 1331 * 1332 *---------------------------------------------------------------------------- 1333 */ 1334NNON_LEAF(MachUserGenException, STAND_FRAME_SIZE, ra) 1335 .set noat 1336 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 1337/* 1338 * Save all of the registers except for the kernel temporaries in u.u_pcb. 1339 */ 1340 sw AT, UADDR+U_PCB_REGS+(AST * 4) 1341 sw v0, UADDR+U_PCB_REGS+(V0 * 4) 1342 sw v1, UADDR+U_PCB_REGS+(V1 * 4) 1343 sw a0, UADDR+U_PCB_REGS+(A0 * 4) 1344 mflo v0 1345 sw a1, UADDR+U_PCB_REGS+(A1 * 4) 1346 sw a2, UADDR+U_PCB_REGS+(A2 * 4) 1347 sw a3, UADDR+U_PCB_REGS+(A3 * 4) 1348 sw t0, UADDR+U_PCB_REGS+(T0 * 4) 1349 mfhi v1 1350 sw t1, UADDR+U_PCB_REGS+(T1 * 4) 1351 sw t2, UADDR+U_PCB_REGS+(T2 * 4) 1352 sw t3, UADDR+U_PCB_REGS+(T3 * 4) 1353 sw t4, UADDR+U_PCB_REGS+(T4 * 4) 1354 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1355 sw t5, UADDR+U_PCB_REGS+(T5 * 4) 1356 sw t6, UADDR+U_PCB_REGS+(T6 * 4) 1357 sw t7, UADDR+U_PCB_REGS+(T7 * 4) 1358 sw s0, UADDR+U_PCB_REGS+(S0 * 4) 1359 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1360 sw s1, UADDR+U_PCB_REGS+(S1 * 4) 1361 sw s2, UADDR+U_PCB_REGS+(S2 * 4) 1362 sw s3, UADDR+U_PCB_REGS+(S3 * 4) 1363 sw s4, UADDR+U_PCB_REGS+(S4 * 4) 1364 mfc0 a2, MACH_COP_0_BAD_VADDR # Third arg is the fault addr 1365 sw s5, UADDR+U_PCB_REGS+(S5 * 4) 1366 sw s6, UADDR+U_PCB_REGS+(S6 * 4) 1367 sw s7, UADDR+U_PCB_REGS+(S7 * 4) 1368 sw t8, UADDR+U_PCB_REGS+(T8 * 4) 1369 mfc0 a3, MACH_COP_0_EXC_PC # Fourth arg is the pc. 1370 sw t9, UADDR+U_PCB_REGS+(T9 * 4) 1371 sw gp, UADDR+U_PCB_REGS+(GP * 4) 1372 sw sp, UADDR+U_PCB_REGS+(SP * 4) 1373 sw s8, UADDR+U_PCB_REGS+(S8 * 4) 1374 li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP 1375 sw ra, UADDR+U_PCB_REGS+(RA * 4) 1376 sw v0, UADDR+U_PCB_REGS+(MULLO * 4) 1377 sw v1, UADDR+U_PCB_REGS+(MULHI * 4) 1378 sw a0, UADDR+U_PCB_REGS+(SR * 4) 1379 # la gp, _gp # switch to kernel GP 1380 sw a3, UADDR+U_PCB_REGS+(PC * 4) 1381 sw a3, STAND_RA_OFFSET(sp) # for debugging 1382 .set at 1383 and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. 1384 .set noat 1385/* 1386 * Call the exception handler. 1387 */ 1388 jal trap 1389 mtc0 t0, MACH_COP_0_STATUS_REG 1390/* 1391 * Restore user registers and return. NOTE: interrupts are enabled. 1392 */ 1393 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1394 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1395 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1396 mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts 1397 mtlo t0 1398 mthi t1 1399 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1400 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1401 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1402 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1403 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1404 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1405 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1406 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1407 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1408 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1409 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1410 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1411 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1412 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1413 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1414 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1415 lw s0, UADDR+U_PCB_REGS+(S0 * 4) 1416 lw s1, UADDR+U_PCB_REGS+(S1 * 4) 1417 lw s2, UADDR+U_PCB_REGS+(S2 * 4) 1418 lw s3, UADDR+U_PCB_REGS+(S3 * 4) 1419 lw s4, UADDR+U_PCB_REGS+(S4 * 4) 1420 lw s5, UADDR+U_PCB_REGS+(S5 * 4) 1421 lw s6, UADDR+U_PCB_REGS+(S6 * 4) 1422 lw s7, UADDR+U_PCB_REGS+(S7 * 4) 1423 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1424 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1425 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1426 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1427 lw s8, UADDR+U_PCB_REGS+(S8 * 4) 1428 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1429 j k0 1430 rfe 1431 .set at 1432END(MachUserGenException) 1433 1434/*---------------------------------------------------------------------------- 1435 * 1436 * MachKernIntr -- 1437 * 1438 * Handle an interrupt from kernel mode. 1439 * Interrupts use the standard kernel stack. 1440 * switch_exit sets up a kernel stack after exit so interrupts won't fail. 1441 * 1442 * Results: 1443 * None. 1444 * 1445 * Side effects: 1446 * None. 1447 * 1448 *---------------------------------------------------------------------------- 1449 */ 1450#define KINTR_REG_OFFSET (STAND_FRAME_SIZE) 1451#define KINTR_SR_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE) 1452#define KINTR_MULT_LO_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 4) 1453#define KINTR_MULT_HI_OFFSET (STAND_FRAME_SIZE + KERN_REG_SIZE + 8) 1454#define KINTR_FRAME_SIZE (STAND_FRAME_SIZE + KERN_REG_SIZE + 12) 1455 1456NNON_LEAF(MachKernIntr, KINTR_FRAME_SIZE, ra) 1457 .set noat 1458 subu sp, sp, KINTR_FRAME_SIZE # allocate stack frame 1459 .mask 0x80000000, (STAND_RA_OFFSET - KINTR_FRAME_SIZE) 1460/* 1461 * Save the relevant kernel registers onto the stack. 1462 * We don't need to save s0 - s8, sp and gp because 1463 * the compiler does it for us. 1464 */ 1465 sw AT, KINTR_REG_OFFSET + 0(sp) 1466 sw v0, KINTR_REG_OFFSET + 4(sp) 1467 sw v1, KINTR_REG_OFFSET + 8(sp) 1468 sw a0, KINTR_REG_OFFSET + 12(sp) 1469 mflo v0 1470 mfhi v1 1471 sw a1, KINTR_REG_OFFSET + 16(sp) 1472 sw a2, KINTR_REG_OFFSET + 20(sp) 1473 sw a3, KINTR_REG_OFFSET + 24(sp) 1474 sw t0, KINTR_REG_OFFSET + 28(sp) 1475 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1476 sw t1, KINTR_REG_OFFSET + 32(sp) 1477 sw t2, KINTR_REG_OFFSET + 36(sp) 1478 sw t3, KINTR_REG_OFFSET + 40(sp) 1479 sw t4, KINTR_REG_OFFSET + 44(sp) 1480 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1481 sw t5, KINTR_REG_OFFSET + 48(sp) 1482 sw t6, KINTR_REG_OFFSET + 52(sp) 1483 sw t7, KINTR_REG_OFFSET + 56(sp) 1484 sw t8, KINTR_REG_OFFSET + 60(sp) 1485 mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. 1486 sw t9, KINTR_REG_OFFSET + 64(sp) 1487 sw ra, KINTR_REG_OFFSET + 68(sp) 1488 sw v0, KINTR_MULT_LO_OFFSET(sp) 1489 sw v1, KINTR_MULT_HI_OFFSET(sp) 1490 sw a0, KINTR_SR_OFFSET(sp) 1491/* 1492 * Call the interrupt handler. 1493 */ 1494 jal interrupt 1495 sw a2, STAND_RA_OFFSET(sp) # for debugging 1496/* 1497 * Restore registers and return from the interrupt. 1498 */ 1499 lw a0, KINTR_SR_OFFSET(sp) 1500 lw t0, KINTR_MULT_LO_OFFSET(sp) 1501 lw t1, KINTR_MULT_HI_OFFSET(sp) 1502 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1503 mtlo t0 1504 mthi t1 1505 lw k0, STAND_RA_OFFSET(sp) 1506 lw AT, KINTR_REG_OFFSET + 0(sp) 1507 lw v0, KINTR_REG_OFFSET + 4(sp) 1508 lw v1, KINTR_REG_OFFSET + 8(sp) 1509 lw a0, KINTR_REG_OFFSET + 12(sp) 1510 lw a1, KINTR_REG_OFFSET + 16(sp) 1511 lw a2, KINTR_REG_OFFSET + 20(sp) 1512 lw a3, KINTR_REG_OFFSET + 24(sp) 1513 lw t0, KINTR_REG_OFFSET + 28(sp) 1514 lw t1, KINTR_REG_OFFSET + 32(sp) 1515 lw t2, KINTR_REG_OFFSET + 36(sp) 1516 lw t3, KINTR_REG_OFFSET + 40(sp) 1517 lw t4, KINTR_REG_OFFSET + 44(sp) 1518 lw t5, KINTR_REG_OFFSET + 48(sp) 1519 lw t6, KINTR_REG_OFFSET + 52(sp) 1520 lw t7, KINTR_REG_OFFSET + 56(sp) 1521 lw t8, KINTR_REG_OFFSET + 60(sp) 1522 lw t9, KINTR_REG_OFFSET + 64(sp) 1523 lw ra, KINTR_REG_OFFSET + 68(sp) 1524 addu sp, sp, KINTR_FRAME_SIZE 1525 j k0 # Now return from the 1526 rfe # interrupt. 1527 .set at 1528END(MachKernIntr) 1529 1530/*---------------------------------------------------------------------------- 1531 * 1532 * MachUserIntr -- 1533 * 1534 * Handle an interrupt from user mode. 1535 * Note: we save minimal state in the u.u_pcb struct and use the standard 1536 * kernel stack since there has to be a u page if we came from user mode. 1537 * If there is a pending software interrupt, then save the remaining state 1538 * and call softintr(). This is all because if we call switch() inside 1539 * interrupt(), not all the user registers have been saved in u.u_pcb. 1540 * 1541 * Results: 1542 * None. 1543 * 1544 * Side effects: 1545 * None. 1546 * 1547 *---------------------------------------------------------------------------- 1548 */ 1549NNON_LEAF(MachUserIntr, STAND_FRAME_SIZE, ra) 1550 .set noat 1551 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 1552/* 1553 * Save the relevant user registers into the u.u_pcb struct. 1554 * We don't need to save s0 - s8 because 1555 * the compiler does it for us. 1556 */ 1557 sw AT, UADDR+U_PCB_REGS+(AST * 4) 1558 sw v0, UADDR+U_PCB_REGS+(V0 * 4) 1559 sw v1, UADDR+U_PCB_REGS+(V1 * 4) 1560 sw a0, UADDR+U_PCB_REGS+(A0 * 4) 1561 mflo v0 1562 mfhi v1 1563 sw a1, UADDR+U_PCB_REGS+(A1 * 4) 1564 sw a2, UADDR+U_PCB_REGS+(A2 * 4) 1565 sw a3, UADDR+U_PCB_REGS+(A3 * 4) 1566 sw t0, UADDR+U_PCB_REGS+(T0 * 4) 1567 mfc0 a0, MACH_COP_0_STATUS_REG # First arg is the status reg. 1568 sw t1, UADDR+U_PCB_REGS+(T1 * 4) 1569 sw t2, UADDR+U_PCB_REGS+(T2 * 4) 1570 sw t3, UADDR+U_PCB_REGS+(T3 * 4) 1571 sw t4, UADDR+U_PCB_REGS+(T4 * 4) 1572 mfc0 a1, MACH_COP_0_CAUSE_REG # Second arg is the cause reg. 1573 sw t5, UADDR+U_PCB_REGS+(T5 * 4) 1574 sw t6, UADDR+U_PCB_REGS+(T6 * 4) 1575 sw t7, UADDR+U_PCB_REGS+(T7 * 4) 1576 sw t8, UADDR+U_PCB_REGS+(T8 * 4) 1577 mfc0 a2, MACH_COP_0_EXC_PC # Third arg is the pc. 1578 sw t9, UADDR+U_PCB_REGS+(T9 * 4) 1579 sw gp, UADDR+U_PCB_REGS+(GP * 4) 1580 sw sp, UADDR+U_PCB_REGS+(SP * 4) 1581 sw ra, UADDR+U_PCB_REGS+(RA * 4) 1582 li sp, KERNELSTACK - STAND_FRAME_SIZE # switch to kernel SP 1583 sw v0, UADDR+U_PCB_REGS+(MULLO * 4) 1584 sw v1, UADDR+U_PCB_REGS+(MULHI * 4) 1585 sw a0, UADDR+U_PCB_REGS+(SR * 4) 1586 sw a2, UADDR+U_PCB_REGS+(PC * 4) 1587 # la gp, _gp # switch to kernel GP 1588 .set at 1589 and t0, a0, ~MACH_SR_COP_1_BIT # Turn off the FPU. 1590 .set noat 1591 mtc0 t0, MACH_COP_0_STATUS_REG 1592/* 1593 * Call the interrupt handler. 1594 */ 1595 jal interrupt 1596 sw a2, STAND_RA_OFFSET(sp) # for debugging 1597/* 1598 * Restore registers and return from the interrupt. 1599 */ 1600 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1601 lw v0, astpending # any pending interrupts? 1602 mtc0 a0, MACH_COP_0_STATUS_REG # Restore the SR, disable intrs 1603 bne v0, zero, 1f # dont restore, call softintr 1604 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1605 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1606 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1607 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1608 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1609 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1610 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1611 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1612 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1613 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1614 mtlo t0 1615 mthi t1 1616 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1617 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1618 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1619 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1620 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1621 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1622 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1623 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1624 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1625 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1626 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1627 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1628 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1629 j k0 # Now return from the 1630 rfe # interrupt. 1631 16321: 1633/* 1634 * We have pending software interrupts; save remaining user state in u.u_pcb. 1635 */ 1636 sw s0, UADDR+U_PCB_REGS+(S0 * 4) 1637 sw s1, UADDR+U_PCB_REGS+(S1 * 4) 1638 sw s2, UADDR+U_PCB_REGS+(S2 * 4) 1639 sw s3, UADDR+U_PCB_REGS+(S3 * 4) 1640 sw s4, UADDR+U_PCB_REGS+(S4 * 4) 1641 sw s5, UADDR+U_PCB_REGS+(S5 * 4) 1642 sw s6, UADDR+U_PCB_REGS+(S6 * 4) 1643 sw s7, UADDR+U_PCB_REGS+(S7 * 4) 1644 sw s8, UADDR+U_PCB_REGS+(S8 * 4) 1645 li t0, MACH_HARD_INT_MASK | MACH_SR_INT_ENA_CUR 1646/* 1647 * Call the software interrupt handler. 1648 */ 1649 jal softintr 1650 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts (spl0) 1651/* 1652 * Restore user registers and return. NOTE: interrupts are enabled. 1653 */ 1654 lw a0, UADDR+U_PCB_REGS+(SR * 4) 1655 lw t0, UADDR+U_PCB_REGS+(MULLO * 4) 1656 lw t1, UADDR+U_PCB_REGS+(MULHI * 4) 1657 mtc0 a0, MACH_COP_0_STATUS_REG # this should disable interrupts 1658 mtlo t0 1659 mthi t1 1660 lw k0, UADDR+U_PCB_REGS+(PC * 4) 1661 lw AT, UADDR+U_PCB_REGS+(AST * 4) 1662 lw v0, UADDR+U_PCB_REGS+(V0 * 4) 1663 lw v1, UADDR+U_PCB_REGS+(V1 * 4) 1664 lw a0, UADDR+U_PCB_REGS+(A0 * 4) 1665 lw a1, UADDR+U_PCB_REGS+(A1 * 4) 1666 lw a2, UADDR+U_PCB_REGS+(A2 * 4) 1667 lw a3, UADDR+U_PCB_REGS+(A3 * 4) 1668 lw t0, UADDR+U_PCB_REGS+(T0 * 4) 1669 lw t1, UADDR+U_PCB_REGS+(T1 * 4) 1670 lw t2, UADDR+U_PCB_REGS+(T2 * 4) 1671 lw t3, UADDR+U_PCB_REGS+(T3 * 4) 1672 lw t4, UADDR+U_PCB_REGS+(T4 * 4) 1673 lw t5, UADDR+U_PCB_REGS+(T5 * 4) 1674 lw t6, UADDR+U_PCB_REGS+(T6 * 4) 1675 lw t7, UADDR+U_PCB_REGS+(T7 * 4) 1676 lw s0, UADDR+U_PCB_REGS+(S0 * 4) 1677 lw s1, UADDR+U_PCB_REGS+(S1 * 4) 1678 lw s2, UADDR+U_PCB_REGS+(S2 * 4) 1679 lw s3, UADDR+U_PCB_REGS+(S3 * 4) 1680 lw s4, UADDR+U_PCB_REGS+(S4 * 4) 1681 lw s5, UADDR+U_PCB_REGS+(S5 * 4) 1682 lw s6, UADDR+U_PCB_REGS+(S6 * 4) 1683 lw s7, UADDR+U_PCB_REGS+(S7 * 4) 1684 lw t8, UADDR+U_PCB_REGS+(T8 * 4) 1685 lw t9, UADDR+U_PCB_REGS+(T9 * 4) 1686 lw gp, UADDR+U_PCB_REGS+(GP * 4) 1687 lw sp, UADDR+U_PCB_REGS+(SP * 4) 1688 lw s8, UADDR+U_PCB_REGS+(S8 * 4) 1689 lw ra, UADDR+U_PCB_REGS+(RA * 4) 1690 j k0 1691 rfe 1692 .set at 1693END(MachUserIntr) 1694 1695#if 0 1696/*---------------------------------------------------------------------------- 1697 * 1698 * MachTLBModException -- 1699 * 1700 * Handle a TLB modified exception. 1701 * The BaddVAddr, Context, and EntryHi registers contain the failed 1702 * virtual address. 1703 * 1704 * Results: 1705 * None. 1706 * 1707 * Side effects: 1708 * None. 1709 * 1710 *---------------------------------------------------------------------------- 1711 */ 1712NLEAF(MachTLBModException) 1713 .set noat 1714 tlbp # find the TLB entry 1715 mfc0 k0, MACH_COP_0_TLB_LOW # get the physical address 1716 mfc0 k1, MACH_COP_0_TLB_INDEX # check to be sure its valid 1717 or k0, k0, VMMACH_TLB_MOD_BIT # update TLB 1718 blt k1, zero, 4f # not found!!! 1719 mtc0 k0, MACH_COP_0_TLB_LOW 1720 li k1, MACH_CACHED_MEMORY_ADDR 1721 subu k0, k0, k1 1722 srl k0, k0, VMMACH_TLB_PHYS_PAGE_SHIFT 1723 la k1, pmap_attributes 1724 addu k0, k0, k1 1725 lbu k1, 0(k0) # fetch old value 1726 nop 1727 or k1, k1, 1 # set modified bit 1728 sb k1, 0(k0) # save new value 1729 mfc0 k0, MACH_COP_0_EXC_PC # get return address 1730 nop 1731 j k0 1732 rfe 17334: 1734 break 0 # panic 1735 .set at 1736END(MachTLBModException) 1737#endif 1738 1739/*---------------------------------------------------------------------------- 1740 * 1741 * MachTLBMissException -- 1742 * 1743 * Handle a TLB miss exception from kernel mode. 1744 * The BaddVAddr, Context, and EntryHi registers contain the failed 1745 * virtual address. 1746 * 1747 * Results: 1748 * None. 1749 * 1750 * Side effects: 1751 * None. 1752 * 1753 *---------------------------------------------------------------------------- 1754 */ 1755NLEAF(MachTLBMissException) 1756 .set noat 1757 mfc0 k0, MACH_COP_0_BAD_VADDR # get the fault address 1758 li k1, VM_MIN_KERNEL_ADDRESS # compute index 1759 subu k0, k0, k1 1760 lw k1, Sysmapsize # index within range? 1761 srl k0, k0, PGSHIFT 1762 sltu k1, k0, k1 1763 beq k1, zero, 1f # No. check for valid stack 1764 nop 1765 lw k1, Sysmap 1766 sll k0, k0, 2 # compute offset from index 1767 addu k1, k1, k0 1768 lw k0, 0(k1) # get PTE entry 1769 mfc0 k1, MACH_COP_0_EXC_PC # get return address 1770 mtc0 k0, MACH_COP_0_TLB_LOW # save PTE entry 1771 and k0, k0, PG_V # check for valid entry 1772 beq k0, zero, MachKernGenException # PTE invalid 1773 nop 1774 tlbwr # update TLB 1775 j k1 1776 rfe 1777 17781: 1779 subu k0, sp, UADDR + 0x200 # check to see if we have a 1780 sltiu k0, UPAGES*NBPG - 0x200 # valid kernel stack 1781 bne k0, zero, MachKernGenException # Go panic 1782 nop 1783 1784 la a0, start - START_FRAME - 8 # set sp to a valid place 1785 sw sp, 24(a0) 1786 move sp, a0 1787 la a0, 1f 1788 mfc0 a2, MACH_COP_0_STATUS_REG 1789 mfc0 a3, MACH_COP_0_CAUSE_REG 1790 mfc0 a1, MACH_COP_0_EXC_PC 1791 sw a2, 16(sp) 1792 sw a3, 20(sp) 1793 sw sp, 24(sp) 1794 move a2, ra 1795 jal printf 1796 mfc0 a3, MACH_COP_0_BAD_VADDR 1797 .data 17981: 1799 .asciiz "ktlbmiss: PC %x RA %x ADR %x\nSR %x CR %x SP %x\n" 1800 .text 1801 1802 la sp, start - START_FRAME # set sp to a valid place 1803 PANIC("kernel stack overflow") 1804 .set at 1805END(MachTLBMissException) 1806 1807/* 1808 * Set/clear software interrupt routines. 1809 */ 1810 1811LEAF(setsoftclock) 1812 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1813 nop 1814 or v0, v0, MACH_SOFT_INT_MASK_0 # set soft clock interrupt 1815 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1816 j ra 1817 nop 1818END(setsoftclock) 1819 1820LEAF(clearsoftclock) 1821 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1822 nop 1823 and v0, v0, ~MACH_SOFT_INT_MASK_0 # clear soft clock interrupt 1824 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1825 j ra 1826 nop 1827END(clearsoftclock) 1828 1829LEAF(setsoftnet) 1830 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1831 nop 1832 or v0, v0, MACH_SOFT_INT_MASK_1 # set soft net interrupt 1833 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1834 j ra 1835 nop 1836END(setsoftnet) 1837 1838LEAF(clearsoftnet) 1839 mfc0 v0, MACH_COP_0_CAUSE_REG # read cause register 1840 nop 1841 and v0, v0, ~MACH_SOFT_INT_MASK_1 # clear soft net interrupt 1842 mtc0 v0, MACH_COP_0_CAUSE_REG # save it 1843 j ra 1844 nop 1845END(clearsoftnet) 1846 1847/* 1848 * Set/change interrupt priority routines. 1849 */ 1850 1851LEAF(MachEnableIntr) 1852 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1853 nop 1854 or v0, v0, MACH_SR_INT_ENA_CUR 1855 mtc0 v0, MACH_COP_0_STATUS_REG # enable all interrupts 1856 j ra 1857 nop 1858END(MachEnableIntr) 1859 1860LEAF(spl0) 1861 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1862 nop 1863 or t0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1864 mtc0 t0, MACH_COP_0_STATUS_REG # enable all interrupts 1865 j ra 1866 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1867END(spl0) 1868 1869LEAF(splsoftclock) 1870 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1871 li t0, ~MACH_SOFT_INT_MASK_0 # disable soft clock 1872 and t0, t0, v0 1873 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1874 j ra 1875 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1876END(splsoftclock) 1877 1878LEAF(Mach_spl0) 1879 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1880 li t0, ~(MACH_INT_MASK_0|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1881 and t0, t0, v0 1882 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1883 j ra 1884 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1885END(Mach_spl0) 1886 1887LEAF(Mach_spl1) 1888 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1889 li t0, ~(MACH_INT_MASK_1|MACH_SOFT_INT_MASK_0|MACH_SOFT_INT_MASK_1) 1890 and t0, t0, v0 1891 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1892 j ra 1893 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1894END(Mach_spl1) 1895 1896LEAF(Mach_spl2) 1897 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1898 li t0, ~(MACH_INT_MASK_2|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1899 and t0, t0, v0 1900 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1901 j ra 1902 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1903END(Mach_spl2) 1904 1905LEAF(Mach_spl3) 1906 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1907 li t0, ~(MACH_INT_MASK_3|MACH_SOFT_INT_MASK_1|MACH_SOFT_INT_MASK_0) 1908 and t0, t0, v0 1909 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1910 j ra 1911 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1912END(Mach_spl3) 1913 1914/* 1915 * We define an alternate entry point after mcount is called so it 1916 * can be used in mcount without causeing a recursive loop. 1917 */ 1918LEAF(splhigh) 1919ALEAF(_splhigh) 1920 mfc0 v0, MACH_COP_0_STATUS_REG # read status register 1921 li t0, ~MACH_SR_INT_ENA_CUR # disable all interrupts 1922 and t0, t0, v0 1923 mtc0 t0, MACH_COP_0_STATUS_REG # save it 1924 j ra 1925 and v0, v0, (MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1926END(splhigh) 1927 1928/* 1929 * Restore saved interrupt mask. 1930 */ 1931LEAF(splx) 1932ALEAF(_splx) 1933 mfc0 v0, MACH_COP_0_STATUS_REG 1934 li t0, ~(MACH_INT_MASK | MACH_SR_INT_ENA_CUR) 1935 and t0, t0, v0 1936 or t0, t0, a0 1937 mtc0 t0, MACH_COP_0_STATUS_REG 1938 j ra 1939 nop 1940END(splx) 1941 1942/*---------------------------------------------------------------------------- 1943 * 1944 * MachEmptyWriteBuffer -- 1945 * 1946 * Return when the write buffer is empty. 1947 * 1948 * MachEmptyWriteBuffer() 1949 * 1950 * Results: 1951 * None. 1952 * 1953 * Side effects: 1954 * None. 1955 * 1956 *---------------------------------------------------------------------------- 1957 */ 1958LEAF(MachEmptyWriteBuffer) 1959 nop 1960 nop 1961 nop 1962 nop 19631: bc0f 1b 1964 nop 1965 j ra 1966 nop 1967END(MachEmptyWriteBuffer) 1968 1969/*-------------------------------------------------------------------------- 1970 * 1971 * MachTLBWriteIndexed -- 1972 * 1973 * Write the given entry into the TLB at the given index. 1974 * 1975 * MachTLBWriteIndexed(index, highEntry, lowEntry) 1976 * int index; 1977 * int highEntry; 1978 * int lowEntry; 1979 * 1980 * Results: 1981 * None. 1982 * 1983 * Side effects: 1984 * TLB entry set. 1985 * 1986 *-------------------------------------------------------------------------- 1987 */ 1988LEAF(MachTLBWriteIndexed) 1989 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 1990 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 1991 mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID. 1992 1993 sll a0, a0, VMMACH_TLB_INDEX_SHIFT 1994 mtc0 a0, MACH_COP_0_TLB_INDEX # Set the index. 1995 mtc0 a1, MACH_COP_0_TLB_HI # Set up entry high. 1996 mtc0 a2, MACH_COP_0_TLB_LOW # Set up entry low. 1997 nop 1998 tlbwi # Write the TLB 1999 2000 mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID. 2001 j ra 2002 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2003END(MachTLBWriteIndexed) 2004 2005#if 0 2006/*-------------------------------------------------------------------------- 2007 * 2008 * MachTLBWriteRandom -- 2009 * 2010 * Write the given entry into the TLB at a random location. 2011 * 2012 * MachTLBWriteRandom(highEntry, lowEntry) 2013 * unsigned highEntry; 2014 * unsigned lowEntry; 2015 * 2016 * Results: 2017 * None. 2018 * 2019 * Side effects: 2020 * TLB entry set. 2021 * 2022 *-------------------------------------------------------------------------- 2023 */ 2024LEAF(MachTLBWriteRandom) 2025 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2026 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2027 mfc0 v0, MACH_COP_0_TLB_HI # Save the current PID. 2028 nop 2029 2030 mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. 2031 mtc0 a1, MACH_COP_0_TLB_LOW # Set up entry low. 2032 nop 2033 tlbwr # Write the TLB 2034 2035 mtc0 v0, MACH_COP_0_TLB_HI # Restore the PID. 2036 j ra 2037 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2038END(MachTLBWriteRandom) 2039#endif 2040 2041/*-------------------------------------------------------------------------- 2042 * 2043 * MachSetPID -- 2044 * 2045 * Write the given pid into the TLB pid reg. 2046 * 2047 * MachSetPID(pid) 2048 * int pid; 2049 * 2050 * Results: 2051 * None. 2052 * 2053 * Side effects: 2054 * PID set in the entry hi register. 2055 * 2056 *-------------------------------------------------------------------------- 2057 */ 2058LEAF(MachSetPID) 2059 sll a0, a0, VMMACH_TLB_PID_SHIFT # put PID in right spot 2060 mtc0 a0, MACH_COP_0_TLB_HI # Write the hi reg value 2061 j ra 2062 nop 2063END(MachSetPID) 2064 2065/*-------------------------------------------------------------------------- 2066 * 2067 * MachTLBFlush -- 2068 * 2069 * Flush the "random" entries from the TLB. 2070 * 2071 * MachTLBFlush() 2072 * 2073 * Results: 2074 * None. 2075 * 2076 * Side effects: 2077 * The TLB is flushed. 2078 * 2079 *-------------------------------------------------------------------------- 2080 */ 2081LEAF(MachTLBFlush) 2082 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2083 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2084 mfc0 t0, MACH_COP_0_TLB_HI # Save the PID 2085 li t1, MACH_CACHED_MEMORY_ADDR # invalid address 2086 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 2087 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2088/* 2089 * Align the starting value (t1) and the upper bound (t2). 2090 */ 2091 li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT 2092 li t2, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 20931: 2094 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register. 2095 addu t1, t1, 1 << VMMACH_TLB_INDEX_SHIFT # Increment index. 2096 bne t1, t2, 1b 2097 tlbwi # Write the TLB entry. 2098 2099 mtc0 t0, MACH_COP_0_TLB_HI # Restore the PID 2100 j ra 2101 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2102END(MachTLBFlush) 2103 2104#if 0 2105/*-------------------------------------------------------------------------- 2106 * 2107 * MachTLBFlushPID -- 2108 * 2109 * Flush all entries with the given PID from the TLB. 2110 * 2111 * MachTLBFlushPID(pid) 2112 * int pid; 2113 * 2114 * Results: 2115 * None. 2116 * 2117 * Side effects: 2118 * All entries corresponding to this PID are flushed. 2119 * 2120 *-------------------------------------------------------------------------- 2121 */ 2122LEAF(MachTLBFlushPID) 2123 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2124 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2125 mfc0 t0, MACH_COP_0_TLB_HI # Save the current PID 2126 sll a0, a0, VMMACH_TLB_PID_SHIFT # Align the pid to flush. 2127/* 2128 * Align the starting value (t1) and the upper bound (t2). 2129 */ 2130 li t1, VMMACH_FIRST_RAND_ENTRY << VMMACH_TLB_INDEX_SHIFT 2131 li t2, VMMACH_NUM_TLB_ENTRIES << VMMACH_TLB_INDEX_SHIFT 2132 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register 21331: 2134 addu t1, t1, 1 << VMMACH_TLB_INDEX_SHIFT # Increment index. 2135 tlbr # Read from the TLB 2136 mfc0 t4, MACH_COP_0_TLB_HI # Fetch the hi register. 2137 nop 2138 and t4, t4, VMMACH_TLB_PID # compare PIDs 2139 bne t4, a0, 2f 2140 li v0, MACH_CACHED_MEMORY_ADDR # invalid address 2141 mtc0 v0, MACH_COP_0_TLB_HI # Mark entry high as invalid 2142 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2143 nop 2144 tlbwi # Write the entry. 21452: 2146 bne t1, t2, 1b 2147 mtc0 t1, MACH_COP_0_TLB_INDEX # Set the index register 2148 2149 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2150 j ra 2151 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2152END(MachTLBFlushPID) 2153#endif 2154 2155/*-------------------------------------------------------------------------- 2156 * 2157 * MachTLBFlushAddr -- 2158 * 2159 * Flush any TLB entries for the given address and TLB PID. 2160 * 2161 * MachTLBFlushAddr(highreg) 2162 * unsigned highreg; 2163 * 2164 * Results: 2165 * None. 2166 * 2167 * Side effects: 2168 * The process's page is flushed from the TLB. 2169 * 2170 *-------------------------------------------------------------------------- 2171 */ 2172LEAF(MachTLBFlushAddr) 2173 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2174 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2175 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2176 nop 2177 2178 mtc0 a0, MACH_COP_0_TLB_HI # look for addr & PID 2179 nop 2180 tlbp # Probe for the entry. 2181 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2182 li t1, MACH_CACHED_MEMORY_ADDR # Load invalid entry. 2183 bltz v0, 1f # index < 0 => !found 2184 mtc0 t1, MACH_COP_0_TLB_HI # Mark entry high as invalid 2185 mtc0 zero, MACH_COP_0_TLB_LOW # Zero out low entry. 2186 nop 2187 tlbwi 21881: 2189 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2190 j ra 2191 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2192END(MachTLBFlushAddr) 2193 2194/*-------------------------------------------------------------------------- 2195 * 2196 * MachTLBUpdate -- 2197 * 2198 * Update the TLB if highreg is found; otherwise, enter the data. 2199 * 2200 * MachTLBUpdate(highreg, lowreg) 2201 * unsigned highreg, lowreg; 2202 * 2203 * Results: 2204 * None. 2205 * 2206 * Side effects: 2207 * None. 2208 * 2209 *-------------------------------------------------------------------------- 2210 */ 2211LEAF(MachTLBUpdate) 2212 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2213 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2214 mfc0 t0, MACH_COP_0_TLB_HI # Save current PID 2215 nop # 2 cycles before intr disabled 2216 mtc0 a0, MACH_COP_0_TLB_HI # init high reg. 2217 nop 2218 tlbp # Probe for the entry. 2219 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2220 mtc0 a1, MACH_COP_0_TLB_LOW # init low reg. 2221 bltz v0, 1f # index < 0 => !found 2222 sra v0, v0, VMMACH_TLB_INDEX_SHIFT # convert index to regular num 2223 b 2f 2224 tlbwi # update slot found 22251: 2226 mtc0 a0, MACH_COP_0_TLB_HI # init high reg. 2227 nop 2228 tlbwr # enter into a random slot 22292: 2230 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2231 j ra 2232 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2233END(MachTLBUpdate) 2234 2235#if defined(DEBUG) 2236/*-------------------------------------------------------------------------- 2237 * 2238 * MachTLBFind -- 2239 * 2240 * Search the TLB for the given entry. 2241 * 2242 * MachTLBFind(hi) 2243 * unsigned hi; 2244 * 2245 * Results: 2246 * Returns a value >= 0 if the entry was found (the index). 2247 * Returns a value < 0 if the entry was not found. 2248 * 2249 * Side effects: 2250 * tlbhi and tlblo will contain the TLB entry found. 2251 * 2252 *-------------------------------------------------------------------------- 2253 */ 2254 .comm tlbhi, 4 2255 .comm tlblo, 4 2256LEAF(MachTLBFind) 2257 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2258 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2259 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2260 nop 2261 mtc0 a0, MACH_COP_0_TLB_HI # Set up entry high. 2262 nop 2263 tlbp # Probe for the entry. 2264 mfc0 v0, MACH_COP_0_TLB_INDEX # See what we got 2265 nop 2266 bltz v0, 1f # not found 2267 nop 2268 tlbr # read TLB 2269 mfc0 t1, MACH_COP_0_TLB_HI # See what we got 2270 mfc0 t2, MACH_COP_0_TLB_LOW # See what we got 2271 sw t1, tlbhi 2272 sw t2, tlblo 2273 srl v0, v0, VMMACH_TLB_INDEX_SHIFT # convert index to regular num 22741: 2275 mtc0 t0, MACH_COP_0_TLB_HI # Restore current PID 2276 j ra 2277 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2278END(MachTLBFind) 2279 2280/*-------------------------------------------------------------------------- 2281 * 2282 * MachTLBRead -- 2283 * 2284 * Read the TLB entry. 2285 * 2286 * MachTLBRead(entry) 2287 * unsigned entry; 2288 * 2289 * Results: 2290 * None. 2291 * 2292 * Side effects: 2293 * tlbhi and tlblo will contain the TLB entry found. 2294 * 2295 *-------------------------------------------------------------------------- 2296 */ 2297LEAF(MachTLBRead) 2298 mfc0 v1, MACH_COP_0_STATUS_REG # Save the status register. 2299 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts 2300 mfc0 t0, MACH_COP_0_TLB_HI # Get current PID 2301 2302 sll a0, a0, VMMACH_TLB_INDEX_SHIFT 2303 mtc0 a0, MACH_COP_0_TLB_INDEX # Set the index register 2304 nop 2305 tlbr # Read from the TLB 2306 mfc0 t3, MACH_COP_0_TLB_HI # fetch the hi entry 2307 mfc0 t4, MACH_COP_0_TLB_LOW # fetch the low entry 2308 sw t3, tlbhi 2309 sw t4, tlblo 2310 2311 mtc0 t0, MACH_COP_0_TLB_HI # restore PID 2312 j ra 2313 mtc0 v1, MACH_COP_0_STATUS_REG # Restore the status register 2314END(MachTLBRead) 2315 2316/*-------------------------------------------------------------------------- 2317 * 2318 * MachTLBGetPID -- 2319 * 2320 * MachTLBGetPID() 2321 * 2322 * Results: 2323 * Returns the current TLB pid reg. 2324 * 2325 * Side effects: 2326 * None. 2327 * 2328 *-------------------------------------------------------------------------- 2329 */ 2330LEAF(MachTLBGetPID) 2331 mfc0 v0, MACH_COP_0_TLB_HI # get PID 2332 nop 2333 and v0, v0, VMMACH_TLB_PID # mask off PID 2334 j ra 2335 srl v0, v0, VMMACH_TLB_PID_SHIFT # put PID in right spot 2336END(MachTLBGetPID) 2337 2338/* 2339 * Return the current value of the cause register. 2340 */ 2341LEAF(MachGetCauseReg) 2342 mfc0 v0, MACH_COP_0_CAUSE_REG 2343 j ra 2344 nop 2345END(MachGetCauseReg) 2346#endif /* DEBUG */ 2347 2348/*---------------------------------------------------------------------------- 2349 * 2350 * MachSwitchFPState -- 2351 * 2352 * Save the current state into 'from' and restore it from 'to'. 2353 * 2354 * MachSwitchFPState(from, to) 2355 * struct proc *from; 2356 * struct user *to; 2357 * 2358 * Results: 2359 * None. 2360 * 2361 * Side effects: 2362 * None. 2363 * 2364 *---------------------------------------------------------------------------- 2365 */ 2366LEAF(MachSwitchFPState) 2367 mfc0 t1, MACH_COP_0_STATUS_REG # Save old SR 2368 li t0, MACH_SR_COP_1_BIT # enable the coprocessor 2369 mtc0 t0, MACH_COP_0_STATUS_REG 2370 2371 beq a0, zero, 1f # skip save if NULL pointer 2372 nop 2373/* 2374 * First read out the status register to make sure that all FP operations 2375 * have completed. 2376 */ 2377 lw a0, P_ADDR(a0) # get pointer to pcb for proc 2378 cfc1 t0, MACH_FPC_CSR # stall til FP done 2379 cfc1 t0, MACH_FPC_CSR # now get status 2380 li t3, ~MACH_SR_COP_1_BIT 2381 lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register 2382 sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status 2383 and t2, t2, t3 # clear COP_1 enable bit 2384 sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register 2385/* 2386 * Save the floating point registers. 2387 */ 2388 swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) 2389 swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) 2390 swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) 2391 swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) 2392 swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) 2393 swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) 2394 swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) 2395 swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) 2396 swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) 2397 swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) 2398 swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) 2399 swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) 2400 swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) 2401 swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) 2402 swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) 2403 swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) 2404 swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) 2405 swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) 2406 swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) 2407 swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) 2408 swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) 2409 swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) 2410 swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) 2411 swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) 2412 swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) 2413 swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) 2414 swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) 2415 swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) 2416 swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) 2417 swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) 2418 swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) 2419 swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) 2420 24211: 2422/* 2423 * Restore the floating point registers. 2424 */ 2425 lw t0, U_PCB_FPREGS+(32 * 4)(a1) # get status register 2426 lwc1 $f0, U_PCB_FPREGS+(0 * 4)(a1) 2427 lwc1 $f1, U_PCB_FPREGS+(1 * 4)(a1) 2428 lwc1 $f2, U_PCB_FPREGS+(2 * 4)(a1) 2429 lwc1 $f3, U_PCB_FPREGS+(3 * 4)(a1) 2430 lwc1 $f4, U_PCB_FPREGS+(4 * 4)(a1) 2431 lwc1 $f5, U_PCB_FPREGS+(5 * 4)(a1) 2432 lwc1 $f6, U_PCB_FPREGS+(6 * 4)(a1) 2433 lwc1 $f7, U_PCB_FPREGS+(7 * 4)(a1) 2434 lwc1 $f8, U_PCB_FPREGS+(8 * 4)(a1) 2435 lwc1 $f9, U_PCB_FPREGS+(9 * 4)(a1) 2436 lwc1 $f10, U_PCB_FPREGS+(10 * 4)(a1) 2437 lwc1 $f11, U_PCB_FPREGS+(11 * 4)(a1) 2438 lwc1 $f12, U_PCB_FPREGS+(12 * 4)(a1) 2439 lwc1 $f13, U_PCB_FPREGS+(13 * 4)(a1) 2440 lwc1 $f14, U_PCB_FPREGS+(14 * 4)(a1) 2441 lwc1 $f15, U_PCB_FPREGS+(15 * 4)(a1) 2442 lwc1 $f16, U_PCB_FPREGS+(16 * 4)(a1) 2443 lwc1 $f17, U_PCB_FPREGS+(17 * 4)(a1) 2444 lwc1 $f18, U_PCB_FPREGS+(18 * 4)(a1) 2445 lwc1 $f19, U_PCB_FPREGS+(19 * 4)(a1) 2446 lwc1 $f20, U_PCB_FPREGS+(20 * 4)(a1) 2447 lwc1 $f21, U_PCB_FPREGS+(21 * 4)(a1) 2448 lwc1 $f22, U_PCB_FPREGS+(22 * 4)(a1) 2449 lwc1 $f23, U_PCB_FPREGS+(23 * 4)(a1) 2450 lwc1 $f24, U_PCB_FPREGS+(24 * 4)(a1) 2451 lwc1 $f25, U_PCB_FPREGS+(25 * 4)(a1) 2452 lwc1 $f26, U_PCB_FPREGS+(26 * 4)(a1) 2453 lwc1 $f27, U_PCB_FPREGS+(27 * 4)(a1) 2454 lwc1 $f28, U_PCB_FPREGS+(28 * 4)(a1) 2455 lwc1 $f29, U_PCB_FPREGS+(29 * 4)(a1) 2456 lwc1 $f30, U_PCB_FPREGS+(30 * 4)(a1) 2457 lwc1 $f31, U_PCB_FPREGS+(31 * 4)(a1) 2458 2459 and t0, t0, ~MACH_FPC_EXCEPTION_BITS 2460 ctc1 t0, MACH_FPC_CSR 2461 nop 2462 2463 mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. 2464 j ra 2465 nop 2466END(MachSwitchFPState) 2467 2468/*---------------------------------------------------------------------------- 2469 * 2470 * MachSaveCurFPState -- 2471 * 2472 * Save the current floating point coprocessor state. 2473 * 2474 * MachSaveCurFPState(p) 2475 * struct proc *p; 2476 * 2477 * Results: 2478 * None. 2479 * 2480 * Side effects: 2481 * machFPCurProcPtr is cleared. 2482 * 2483 *---------------------------------------------------------------------------- 2484 */ 2485LEAF(MachSaveCurFPState) 2486 lw a0, P_ADDR(a0) # get pointer to pcb for proc 2487 mfc0 t1, MACH_COP_0_STATUS_REG # Disable interrupts and 2488 li t0, MACH_SR_COP_1_BIT # enable the coprocessor 2489 mtc0 t0, MACH_COP_0_STATUS_REG 2490 sw zero, machFPCurProcPtr # indicate state has been saved 2491/* 2492 * First read out the status register to make sure that all FP operations 2493 * have completed. 2494 */ 2495 lw t2, U_PCB_REGS+(PS * 4)(a0) # get CPU status register 2496 li t3, ~MACH_SR_COP_1_BIT 2497 and t2, t2, t3 # clear COP_1 enable bit 2498 cfc1 t0, MACH_FPC_CSR # stall til FP done 2499 cfc1 t0, MACH_FPC_CSR # now get status 2500 sw t2, U_PCB_REGS+(PS * 4)(a0) # save new status register 2501 sw t0, U_PCB_FPREGS+(32 * 4)(a0) # save FP status 2502/* 2503 * Save the floating point registers. 2504 */ 2505 swc1 $f0, U_PCB_FPREGS+(0 * 4)(a0) 2506 swc1 $f1, U_PCB_FPREGS+(1 * 4)(a0) 2507 swc1 $f2, U_PCB_FPREGS+(2 * 4)(a0) 2508 swc1 $f3, U_PCB_FPREGS+(3 * 4)(a0) 2509 swc1 $f4, U_PCB_FPREGS+(4 * 4)(a0) 2510 swc1 $f5, U_PCB_FPREGS+(5 * 4)(a0) 2511 swc1 $f6, U_PCB_FPREGS+(6 * 4)(a0) 2512 swc1 $f7, U_PCB_FPREGS+(7 * 4)(a0) 2513 swc1 $f8, U_PCB_FPREGS+(8 * 4)(a0) 2514 swc1 $f9, U_PCB_FPREGS+(9 * 4)(a0) 2515 swc1 $f10, U_PCB_FPREGS+(10 * 4)(a0) 2516 swc1 $f11, U_PCB_FPREGS+(11 * 4)(a0) 2517 swc1 $f12, U_PCB_FPREGS+(12 * 4)(a0) 2518 swc1 $f13, U_PCB_FPREGS+(13 * 4)(a0) 2519 swc1 $f14, U_PCB_FPREGS+(14 * 4)(a0) 2520 swc1 $f15, U_PCB_FPREGS+(15 * 4)(a0) 2521 swc1 $f16, U_PCB_FPREGS+(16 * 4)(a0) 2522 swc1 $f17, U_PCB_FPREGS+(17 * 4)(a0) 2523 swc1 $f18, U_PCB_FPREGS+(18 * 4)(a0) 2524 swc1 $f19, U_PCB_FPREGS+(19 * 4)(a0) 2525 swc1 $f20, U_PCB_FPREGS+(20 * 4)(a0) 2526 swc1 $f21, U_PCB_FPREGS+(21 * 4)(a0) 2527 swc1 $f22, U_PCB_FPREGS+(22 * 4)(a0) 2528 swc1 $f23, U_PCB_FPREGS+(23 * 4)(a0) 2529 swc1 $f24, U_PCB_FPREGS+(24 * 4)(a0) 2530 swc1 $f25, U_PCB_FPREGS+(25 * 4)(a0) 2531 swc1 $f26, U_PCB_FPREGS+(26 * 4)(a0) 2532 swc1 $f27, U_PCB_FPREGS+(27 * 4)(a0) 2533 swc1 $f28, U_PCB_FPREGS+(28 * 4)(a0) 2534 swc1 $f29, U_PCB_FPREGS+(29 * 4)(a0) 2535 swc1 $f30, U_PCB_FPREGS+(30 * 4)(a0) 2536 swc1 $f31, U_PCB_FPREGS+(31 * 4)(a0) 2537 2538 mtc0 t1, MACH_COP_0_STATUS_REG # Restore the status register. 2539 j ra 2540 nop 2541END(MachSaveCurFPState) 2542 2543/*---------------------------------------------------------------------------- 2544 * 2545 * MachFPInterrupt -- 2546 * 2547 * Handle a floating point interrupt. 2548 * 2549 * MachFPInterrupt(statusReg, causeReg, pc) 2550 * unsigned statusReg; 2551 * unsigned causeReg; 2552 * unsigned pc; 2553 * 2554 * Results: 2555 * None. 2556 * 2557 * Side effects: 2558 * None. 2559 * 2560 *---------------------------------------------------------------------------- 2561 */ 2562NON_LEAF(MachFPInterrupt, STAND_FRAME_SIZE, ra) 2563 subu sp, sp, STAND_FRAME_SIZE 2564 mfc0 t0, MACH_COP_0_STATUS_REG 2565 sw ra, STAND_RA_OFFSET(sp) 2566 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 2567 2568 or t1, t0, MACH_SR_COP_1_BIT 2569 mtc0 t1, MACH_COP_0_STATUS_REG 2570 nop 2571 nop 2572 cfc1 t1, MACH_FPC_CSR # stall til FP done 2573 cfc1 t1, MACH_FPC_CSR # now get status 2574 nop 2575 sll t2, t1, (31 - 17) # unimplemented operation? 2576 bgez t2, 3f # no, normal trap 2577 nop 2578/* 2579 * We got an unimplemented operation trap so 2580 * fetch the instruction, compute the next PC and emulate the instruction. 2581 */ 2582 bgez a1, 1f # Check the branch delay bit. 2583 nop 2584/* 2585 * The instruction is in the branch delay slot so the branch will have to 2586 * be emulated to get the resulting PC. 2587 */ 2588 sw a2, STAND_FRAME_SIZE + 8(sp) 2589 li a0, UADDR+U_PCB_REGS # first arg is ptr to CPU registers 2590 move a1, a2 # second arg is instruction PC 2591 move a2, t1 # third arg is floating point CSR 2592 jal MachEmulateBranch # compute PC after branch 2593 move a3, zero # fourth arg is FALSE 2594/* 2595 * Now load the floating-point instruction in the branch delay slot 2596 * to be emulated. 2597 */ 2598 lw a2, STAND_FRAME_SIZE + 8(sp) # restore EXC pc 2599 b 2f 2600 lw a0, 4(a2) # a0 = coproc instruction 2601/* 2602 * This is not in the branch delay slot so calculate the resulting 2603 * PC (epc + 4) into v0 and continue to MachEmulateFP(). 2604 */ 26051: 2606 lw a0, 0(a2) # a0 = coproc instruction 2607 addu v0, a2, 4 # v0 = next pc 26082: 2609 sw v0, UADDR+U_PCB_REGS+(PC * 4) # save new pc 2610/* 2611 * Check to see if the instruction to be emulated is a floating-point 2612 * instruction. 2613 */ 2614 srl a3, a0, MACH_OPCODE_SHIFT 2615 beq a3, MACH_OPCODE_C1, 4f # this should never fail 2616/* 2617 * Send a floating point exception signal to the current process. 2618 */ 26193: 2620 lw a0, curproc # get current process 2621 cfc1 a2, MACH_FPC_CSR # code = FP execptions 2622 ctc1 zero, MACH_FPC_CSR # Clear exceptions 2623 jal trapsignal 2624 li a1, SIGFPE 2625 b FPReturn 2626 nop 2627 2628/* 2629 * Finally, we can call MachEmulateFP() where a0 is the instruction to emulate. 2630 */ 26314: 2632 jal MachEmulateFP 2633 nop 2634 2635/* 2636 * Turn off the floating point coprocessor and return. 2637 */ 2638FPReturn: 2639 mfc0 t0, MACH_COP_0_STATUS_REG 2640 lw ra, STAND_RA_OFFSET(sp) 2641 and t0, t0, ~MACH_SR_COP_1_BIT 2642 mtc0 t0, MACH_COP_0_STATUS_REG 2643 j ra 2644 addu sp, sp, STAND_FRAME_SIZE 2645END(MachFPInterrupt) 2646 2647/*---------------------------------------------------------------------------- 2648 * 2649 * MachConfigCache -- 2650 * 2651 * Size the caches. 2652 * NOTE: should only be called from mach_init(). 2653 * 2654 * Results: 2655 * None. 2656 * 2657 * Side effects: 2658 * The size of the data cache is stored into machDataCacheSize and the 2659 * size of instruction cache is stored into machInstCacheSize. 2660 * 2661 *---------------------------------------------------------------------------- 2662 */ 2663NON_LEAF(MachConfigCache, STAND_FRAME_SIZE, ra) 2664 subu sp, sp, STAND_FRAME_SIZE 2665 sw ra, STAND_RA_OFFSET(sp) # Save return address. 2666 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE) 2667 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2668 la v0, 1f 2669 or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2670 j v0 2671 nop 26721: 2673/* 2674 * This works because jal doesn't change pc[31..28] and the 2675 * linker still thinks SizeCache is in the cached region so it computes 2676 * the correct address without complaining. 2677 */ 2678 jal SizeCache # Get the size of the d-cache. 2679 nop 2680 sw v0, machDataCacheSize 2681 nop # Make sure sw out of pipe 2682 nop 2683 nop 2684 nop 2685 li v0, MACH_SR_SWAP_CACHES # Swap caches 2686 mtc0 v0, MACH_COP_0_STATUS_REG 2687 nop # Insure caches stable 2688 nop 2689 nop 2690 nop 2691 jal SizeCache # Get the size of the i-cache. 2692 nop 2693 mtc0 zero, MACH_COP_0_STATUS_REG # Swap back caches and enable. 2694 nop 2695 nop 2696 nop 2697 nop 2698 sw v0, machInstCacheSize 2699 la t0, 1f 2700 j t0 # Back to cached mode 2701 nop 27021: 2703 lw ra, STAND_RA_OFFSET(sp) # Restore return addr 2704 addu sp, sp, STAND_FRAME_SIZE # Restore sp. 2705 j ra 2706 nop 2707END(MachConfigCache) 2708 2709/*---------------------------------------------------------------------------- 2710 * 2711 * SizeCache -- 2712 * 2713 * Get the size of the cache. 2714 * 2715 * Results: 2716 * The size of the cache. 2717 * 2718 * Side effects: 2719 * None. 2720 * 2721 *---------------------------------------------------------------------------- 2722 */ 2723LEAF(SizeCache) 2724 mfc0 t0, MACH_COP_0_STATUS_REG # Save the current status reg. 2725 nop 2726 or v0, t0, MACH_SR_ISOL_CACHES # Isolate the caches. 2727 nop # Make sure no stores in pipe 2728 mtc0 v0, MACH_COP_0_STATUS_REG 2729 nop # Make sure isolated 2730 nop 2731 nop 2732/* 2733 * Clear cache size boundaries. 2734 */ 2735 li v0, MACH_MIN_CACHE_SIZE 2736 li v1, MACH_CACHED_MEMORY_ADDR 2737 li t2, MACH_MAX_CACHE_SIZE 27381: 2739 addu t1, v0, v1 # Compute address to clear 2740 sw zero, 0(t1) # Clear cache memory 2741 bne v0, t2, 1b 2742 sll v0, v0, 1 2743 2744 li v0, -1 2745 sw v0, 0(v1) # Store marker in cache 2746 li v0, MACH_MIN_CACHE_SIZE 27472: 2748 addu t1, v0, v1 # Compute address 2749 lw t3, 0(t1) # Look for marker 2750 nop 2751 bne t3, zero, 3f # Found marker. 2752 nop 2753 bne v0, t2, 2b # keep looking 2754 sll v0, v0, 1 # cache size * 2 2755 2756 move v0, zero # must be no cache 27573: 2758 mtc0 t0, MACH_COP_0_STATUS_REG 2759 nop # Make sure unisolated 2760 nop 2761 nop 2762 nop 2763 j ra 2764 nop 2765END(SizeCache) 2766 2767/*---------------------------------------------------------------------------- 2768 * 2769 * MachFlushCache -- 2770 * 2771 * Flush the caches. 2772 * 2773 * Results: 2774 * None. 2775 * 2776 * Side effects: 2777 * The contents of the caches is flushed. 2778 * 2779 *---------------------------------------------------------------------------- 2780 */ 2781LEAF(MachFlushCache) 2782 lw t1, machInstCacheSize # Must load before isolating 2783 lw t2, machDataCacheSize # Must load before isolating 2784 mfc0 t3, MACH_COP_0_STATUS_REG # Save the status register. 2785 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2786 la v0, 1f 2787 or v0, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2788 j v0 2789 nop 2790/* 2791 * Flush the instruction cache. 2792 */ 27931: 2794 li v0, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES 2795 mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap caches. 2796 li t0, MACH_UNCACHED_MEMORY_ADDR 2797 subu t0, t0, t1 2798 li t1, MACH_UNCACHED_MEMORY_ADDR 2799 la v0, 1f # Run cached 2800 j v0 2801 nop 28021: 2803 addu t0, t0, 4 2804 bne t0, t1, 1b 2805 sb zero, -4(t0) 2806 2807 la v0, 1f 2808 or v0, MACH_UNCACHED_MEMORY_ADDR 2809 j v0 # Run uncached 2810 nop 2811/* 2812 * Flush the data cache. 2813 */ 28141: 2815 li v0, MACH_SR_ISOL_CACHES 2816 mtc0 v0, MACH_COP_0_STATUS_REG # Isolate and swap back caches 2817 li t0, MACH_UNCACHED_MEMORY_ADDR 2818 subu t0, t0, t2 2819 la v0, 1f 2820 j v0 # Back to cached mode 2821 nop 28221: 2823 addu t0, t0, 4 2824 bne t0, t1, 1b 2825 sb zero, -4(t0) 2826 2827 nop # Insure isolated stores 2828 nop # out of pipe. 2829 nop 2830 nop 2831 mtc0 t3, MACH_COP_0_STATUS_REG # Restore status reg. 2832 nop # Insure cache unisolated. 2833 nop 2834 nop 2835 nop 2836 j ra 2837 nop 2838END(MachFlushCache) 2839 2840/*---------------------------------------------------------------------------- 2841 * 2842 * MachFlushICache -- 2843 * 2844 * void MachFlushICache(addr, len) 2845 * vm_offset_t addr, len; 2846 * 2847 * Flush instruction cache for range of addr to addr + len - 1. 2848 * The address can be any valid address so long as no TLB misses occur. 2849 * 2850 * Results: 2851 * None. 2852 * 2853 * Side effects: 2854 * The contents of the cache is flushed. 2855 * 2856 *---------------------------------------------------------------------------- 2857 */ 2858LEAF(MachFlushICache) 2859 mfc0 t0, MACH_COP_0_STATUS_REG # Save SR 2860 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2861 2862 la v1, 1f 2863 or v1, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2864 j v1 2865 nop 28661: 2867 bc0f 1b # make sure stores are complete 2868 li v1, MACH_SR_ISOL_CACHES | MACH_SR_SWAP_CACHES 2869 mtc0 v1, MACH_COP_0_STATUS_REG 2870 nop 2871 addu a1, a1, a0 # compute ending address 28721: 2873 addu a0, a0, 4 2874 bne a0, a1, 1b 2875 sb zero, -4(a0) 2876 2877 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts 2878 j ra # return and run cached 2879 nop 2880END(MachFlushICache) 2881 2882/*---------------------------------------------------------------------------- 2883 * 2884 * MachFlushDCache -- 2885 * 2886 * void MachFlushDCache(addr, len) 2887 * vm_offset_t addr, len; 2888 * 2889 * Flush data cache for range of addr to addr + len - 1. 2890 * The address can be any valid address so long as no TLB misses occur. 2891 * (Be sure to use cached K0SEG kernel addresses) 2892 * Results: 2893 * None. 2894 * 2895 * Side effects: 2896 * The contents of the cache is flushed. 2897 * 2898 *---------------------------------------------------------------------------- 2899 */ 2900LEAF(MachFlushDCache) 2901 mfc0 t0, MACH_COP_0_STATUS_REG # Save SR 2902 mtc0 zero, MACH_COP_0_STATUS_REG # Disable interrupts. 2903 2904 la v1, 1f 2905 or v1, MACH_UNCACHED_MEMORY_ADDR # Run uncached. 2906 j v1 2907 nop 29081: 2909 bc0f 1b # make sure stores are complete 2910 li v1, MACH_SR_ISOL_CACHES 2911 mtc0 v1, MACH_COP_0_STATUS_REG 2912 nop 2913 addu a1, a1, a0 # compute ending address 29141: 2915 addu a0, a0, 4 2916 bne a0, a1, 1b 2917 sb zero, -4(a0) 2918 2919 mtc0 t0, MACH_COP_0_STATUS_REG # enable interrupts 2920 j ra # return and run cached 2921 nop 2922END(MachFlushDCache) 2923 2924#ifdef KADB 2925/* 2926 * Read a long and return it. 2927 * Note: addresses can be unaligned! 2928 * 2929 * long 2930L* kdbpeek(addr) 2931L* caddt_t addr; 2932L* { 2933L* return (*(long *)addr); 2934L* } 2935 */ 2936LEAF(kdbpeek) 2937 li v0, KADBERR 2938 sw v0, UADDR+U_PCB_ONFAULT 2939 and v0, a0, 3 # unaligned address? 2940 bne v0, zero, 1f 2941 nop 2942 b 2f 2943 lw v0, (a0) # aligned access 29441: 2945 lwr v0, 0(a0) # get next 4 bytes (unaligned) 2946 lwl v0, 3(a0) 29472: 2948 j ra # made it w/o errors 2949 sw zero, UADDR+U_PCB_ONFAULT 2950kadberr: 2951 li v0, 1 # trap sends us here 2952 sw v0, kdbmkfault 2953 j ra 2954 nop 2955END(kdbpeek) 2956 2957/* 2958 * Write a long to 'addr'. 2959 * Note: addresses can be unaligned! 2960 * 2961L* void 2962L* kdbpoke(addr, value) 2963L* caddt_t addr; 2964L* long value; 2965L* { 2966L* *(long *)addr = value; 2967L* } 2968 */ 2969LEAF(kdbpoke) 2970 li v0, KADBERR 2971 sw v0, UADDR+U_PCB_ONFAULT 2972 and v0, a0, 3 # unaligned address? 2973 bne v0, zero, 1f 2974 nop 2975 b 2f 2976 sw a1, (a0) # aligned access 29771: 2978 swr a1, 0(a0) # store next 4 bytes (unaligned) 2979 swl a1, 3(a0) 2980 and a0, a0, ~3 # align address for cache flush 29812: 2982 sw zero, UADDR+U_PCB_ONFAULT 2983 b MachFlushICache # flush instruction cache 2984 li a1, 8 2985END(kdbpoke) 2986 2987/* 2988 * Save registers and state so we can do a 'kdbreset' (like longjmp) later. 2989 * Always returns zero. 2990 * 2991L* int kdb_savearea[11]; 2992L* 2993L* int 2994L* kdbsetexit() 2995L* { 2996L* kdb_savearea[0] = 0; 2997L* return (0); 2998L* } 2999 */ 3000 .comm kdb_savearea, (11 * 4) 3001 3002LEAF(kdbsetexit) 3003 la a0, kdb_savearea 3004 sw s0, 0(a0) 3005 sw s1, 4(a0) 3006 sw s2, 8(a0) 3007 sw s3, 12(a0) 3008 sw s4, 16(a0) 3009 sw s5, 20(a0) 3010 sw s6, 24(a0) 3011 sw s7, 28(a0) 3012 sw sp, 32(a0) 3013 sw s8, 36(a0) 3014 sw ra, 40(a0) 3015 j ra 3016 move v0, zero 3017END(kdbsetexit) 3018 3019/* 3020 * Restore registers and state (like longjmp) and return x. 3021 * 3022L* int 3023L* kdbreset(x) 3024L* { 3025L* return (x); 3026L* } 3027 */ 3028LEAF(kdbreset) 3029 la v0, kdb_savearea 3030 lw ra, 40(v0) 3031 lw s0, 0(v0) 3032 lw s1, 4(v0) 3033 lw s2, 8(v0) 3034 lw s3, 12(v0) 3035 lw s4, 16(v0) 3036 lw s5, 20(v0) 3037 lw s6, 24(v0) 3038 lw s7, 28(v0) 3039 lw sp, 32(v0) 3040 lw s8, 36(v0) 3041 j ra 3042 move v0, a0 3043END(kdbreset) 3044 3045/* 3046 * Trap into the debugger. 3047 * 3048L* void 3049L* kdbpanic() 3050L* { 3051L* } 3052 */ 3053LEAF(kdbpanic) 3054 break MACH_BREAK_KDB_VAL 3055 j ra 3056 nop 3057END(kdbpanic) 3058#endif /* KADB */ 3059 3060#ifdef DEBUG 3061LEAF(cpu_getregs) 3062 sw sp, 0(a0) 3063 sw ra, 4(a0) 3064 j ra 3065 sw s8, 8(a0) 3066END(cpu_getregs) 3067#endif /* DEBUG */ 3068 3069/* 3070 * Interrupt counters for vmstat. 3071 * XXX These aren't used yet. 3072 */ 3073 .data 3074 .globl intrcnt, eintrcnt, intrnames, eintrnames 3075intrnames: 3076 .asciiz "spur" 3077 .asciiz "hil" 3078 .asciiz "lev2" 3079 .asciiz "lev3" 3080 .asciiz "lev4" 3081 .asciiz "lev5" 3082 .asciiz "dma" 3083 .asciiz "clock" 3084 .asciiz "statclock" 3085 .asciiz "nmi" 3086eintrnames: 3087 .align 2 3088intrcnt: 3089 .word 0,0,0,0,0,0,0,0,0,0 3090eintrcnt: 3091