1/* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1980, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: locore.s 1.58 91/04/22$ 13 * 14 * @(#)locore.s 7.11 (Berkeley) 05/09/91 15 */ 16 17#include "assym.s" 18#include "vectors.s" 19 20#define MMUADDR(ar) movl _MMUbase,ar 21#define CLKADDR(ar) movl _CLKbase,ar 22 23 .text 24/* 25 * This is where we wind up if the kernel jumps to location 0. 26 * (i.e. a bogus PC) This is known to immediately follow the vector 27 * table and is hence at 0x400 (see reset vector in vectors.s). 28 */ 29 .globl _panic 30 pea Ljmp0panic 31 jbsr _panic 32 /* NOTREACHED */ 33Ljmp0panic: 34 .asciz "kernel jump to zero" 35 .even 36 37/* 38 * Do a dump. 39 * Called by auto-restart. 40 */ 41 .globl _dumpsys 42 .globl _doadump 43_doadump: 44 jbsr _dumpsys 45 jbsr _doboot 46 /*NOTREACHED*/ 47 48/* 49 * Trap/interrupt vector routines 50 */ 51 52 .globl _trap, _nofault, _longjmp 53_buserr: 54 tstl _nofault | device probe? 55 jeq _addrerr | no, handle as usual 56 movl _nofault,sp@- | yes, 57 jbsr _longjmp | longjmp(nofault) 58_addrerr: 59 clrw sp@- | pad SR to longword 60 moveml #0xFFFF,sp@- | save user registers 61 movl usp,a0 | save the user SP 62 movl a0,sp@(60) | in the savearea 63 lea sp@(64),a1 | grab base of HW berr frame 64 moveq #0,d0 65 movw a1@(12),d0 | grab SSW for fault processing 66 btst #12,d0 | RB set? 67 jeq LbeX0 | no, test RC 68 bset #14,d0 | yes, must set FB 69 movw d0,a1@(12) | for hardware too 70LbeX0: 71 btst #13,d0 | RC set? 72 jeq LbeX1 | no, skip 73 bset #15,d0 | yes, must set FC 74 movw d0,a1@(12) | for hardware too 75LbeX1: 76 btst #8,d0 | data fault? 77 jeq Lbe0 | no, check for hard cases 78 movl a1@(18),d1 | fault address is as given in frame 79 jra Lbe10 | thats it 80Lbe0: 81 btst #4,a1@(8) | long (type B) stack frame? 82 jne Lbe4 | yes, go handle 83 movl a1@(4),d1 | no, can use save PC 84 btst #14,d0 | FB set? 85 jeq Lbe3 | no, try FC 86 addql #4,d1 | yes, adjust address 87 jra Lbe10 | done 88Lbe3: 89 btst #15,d0 | FC set? 90 jeq Lbe10 | no, done 91 addql #2,d1 | yes, adjust address 92 jra Lbe10 | done 93Lbe4: 94 movl a1@(38),d1 | long format, use stage B address 95 btst #15,d0 | FC set? 96 jeq Lbe10 | no, all done 97 subql #2,d1 | yes, adjust address 98Lbe10: 99 movl d1,sp@- | push fault VA 100 movl d0,sp@- | and padded SSW 101 movw a1@(8),d0 | get frame format/vector offset 102 andw #0x0FFF,d0 | clear out frame format 103 cmpw #12,d0 | address error vector? 104 jeq Lisaerr | yes, go to it 105#if defined(HP330) || defined(HP360) || defined(HP370) 106 tstl _mmutype | HP MMU? 107 jeq Lbehpmmu | yes, skip 108 movl d1,a0 | fault address 109 ptestr #1,a0@,#7 | do a table search 110 pmove psr,sp@ | save result 111 btst #7,sp@ | bus error bit set? 112 jeq Lismerr | no, must be MMU fault 113 clrw sp@ | yes, re-clear pad word 114 jra Lisberr | and process as normal bus error 115Lbehpmmu: 116#endif 117#if defined(HP320) || defined(HP350) 118 MMUADDR(a0) 119 movl a0@(MMUSTAT),d0 | read status 120 btst #3,d0 | MMU fault? 121 jeq Lisberr | no, just a non-MMU bus error so skip 122 andl #~MMU_FAULT,a0@(MMUSTAT)| yes, clear fault bits 123 movw d0,sp@ | pass MMU stat in upper half of code 124#endif 125Lismerr: 126 movl #T_MMUFLT,sp@- | show that we are an MMU fault 127 jra Ltrapnstkadj | and deal with it 128Lisaerr: 129 movl #T_ADDRERR,sp@- | mark address error 130 jra Ltrapnstkadj | and deal with it 131Lisberr: 132 movl #T_BUSERR,sp@- | mark bus error 133Ltrapnstkadj: 134 jbsr _trap | handle the error 135 lea sp@(12),sp | pop value args 136 movl sp@(60),a0 | restore user SP 137 movl a0,usp | from save area 138 movw sp@(64),d0 | need to adjust stack? 139 jne Lstkadj | yes, go to it 140 moveml sp@+,#0x7FFF | no, restore most user regs 141 addql #6,sp | toss SSP and pad 142 jra rei | all done 143Lstkadj: 144 lea sp@(66),a1 | pointer to HW frame 145 addql #8,a1 | source pointer 146 movl a1,a0 | source 147 addw d0,a0 | + hole size = dest pointer 148 movl a1@-,a0@- | copy 149 movl a1@-,a0@- | 8 bytes 150 movl a0,sp@(60) | new SSP 151 moveml sp@+,#0x7FFF | restore user registers 152 movl sp@,sp | and our SP 153 jra rei | all done 154 155/* 156 * FP exceptions. 157 */ 158_fpfline: 159 jra _illinst 160 161_fpunsupp: 162 jra _illinst 163 164/* 165 * Handles all other FP coprocessor exceptions. 166 * Note that since some FP exceptions generate mid-instruction frames 167 * and may cause signal delivery, we need to test for stack adjustment 168 * after the trap call. 169 */ 170_fpfault: 171#ifdef FPCOPROC 172 clrw sp@- | pad SR to longword 173 moveml #0xFFFF,sp@- | save user registers 174 movl usp,a0 | and save 175 movl a0,sp@(60) | the user stack pointer 176 clrl sp@- | no VA arg 177 movl _curpcb,a0 | current pcb 178 lea a0@(PCB_FPCTX),a0 | address of FP savearea 179 fsave a0@ | save state 180 tstb a0@ | null state frame? 181 jeq Lfptnull | yes, safe 182 clrw d0 | no, need to tweak BIU 183 movb a0@(1),d0 | get frame size 184 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU 185Lfptnull: 186 fmovem fpsr,sp@- | push fpsr as code argument 187 frestore a0@ | restore state 188 movl #T_FPERR,sp@- | push type arg 189 jra Ltrapnstkadj | call trap and deal with stack cleanup 190#else 191 jra _badtrap | treat as an unexpected trap 192#endif 193 194/* 195 * Coprocessor and format errors can generate mid-instruction stack 196 * frames and cause signal delivery hence we need to check for potential 197 * stack adjustment. 198 */ 199_coperr: 200 clrw sp@- 201 moveml #0xFFFF,sp@- 202 movl usp,a0 | get and save 203 movl a0,sp@(60) | the user stack pointer 204 clrl sp@- | no VA arg 205 clrl sp@- | or code arg 206 movl #T_COPERR,sp@- | push trap type 207 jra Ltrapnstkadj | call trap and deal with stack adjustments 208 209_fmterr: 210 clrw sp@- 211 moveml #0xFFFF,sp@- 212 movl usp,a0 | get and save 213 movl a0,sp@(60) | the user stack pointer 214 clrl sp@- | no VA arg 215 clrl sp@- | or code arg 216 movl #T_FMTERR,sp@- | push trap type 217 jra Ltrapnstkadj | call trap and deal with stack adjustments 218 219/* 220 * Other exceptions only cause four and six word stack frame and require 221 * no post-trap stack adjustment. 222 */ 223_illinst: 224 clrw sp@- 225 moveml #0xFFFF,sp@- 226 moveq #T_ILLINST,d0 227 jra fault 228 229_zerodiv: 230 clrw sp@- 231 moveml #0xFFFF,sp@- 232 moveq #T_ZERODIV,d0 233 jra fault 234 235_chkinst: 236 clrw sp@- 237 moveml #0xFFFF,sp@- 238 moveq #T_CHKINST,d0 239 jra fault 240 241_trapvinst: 242 clrw sp@- 243 moveml #0xFFFF,sp@- 244 moveq #T_TRAPVINST,d0 245 jra fault 246 247_privinst: 248 clrw sp@- 249 moveml #0xFFFF,sp@- 250 moveq #T_PRIVINST,d0 251 jra fault 252 253 .globl fault 254fault: 255 movl usp,a0 | get and save 256 movl a0,sp@(60) | the user stack pointer 257 clrl sp@- | no VA arg 258 clrl sp@- | or code arg 259 movl d0,sp@- | push trap type 260 jbsr _trap | handle trap 261 lea sp@(12),sp | pop value args 262 movl sp@(60),a0 | restore 263 movl a0,usp | user SP 264 moveml sp@+,#0x7FFF | restore most user regs 265 addql #6,sp | pop SP and pad word 266 jra rei | all done 267 268 .globl _straytrap 269_badtrap: 270 clrw sp@- | pad SR 271 moveml #0xC0C0,sp@- | save scratch regs 272 movw sp@(24),sp@- | push exception vector info 273 clrw sp@- 274 movl sp@(24),sp@- | and PC 275 jbsr _straytrap | report 276 addql #8,sp | pop args 277 moveml sp@+,#0x0303 | restore regs 278 addql #2,sp | pop padding 279 jra rei | all done 280 281 .globl _syscall 282_trap0: 283 clrw sp@- | pad SR to longword 284 moveml #0xFFFF,sp@- | save user registers 285 movl usp,a0 | save the user SP 286 movl a0,sp@(60) | in the savearea 287 movl d0,sp@- | push syscall number 288 jbsr _syscall | handle it 289 addql #4,sp | pop syscall arg 290 movl sp@(60),a0 | grab and restore 291 movl a0,usp | user SP 292 moveml sp@+,#0x7FFF | restore most registers 293 addql #6,sp | pop SSP and align word 294 jra rei | all done 295 296/* 297 * Routines for traps 1 and 2. The meaning of the two traps depends 298 * on whether we are an HPUX compatible process or a native 4.3 process. 299 * Our native 4.3 implementation uses trap 1 as sigreturn() and trap 2 300 * as a breakpoint trap. HPUX uses trap 1 for a breakpoint, so we have 301 * to make adjustments so that trap 2 is used for sigreturn. 302 */ 303_trap1: 304 btst #PCB_TRCB,pcbflag | being traced by an HPUX process? 305 jeq sigreturn | no, trap1 is sigreturn 306 jra _trace | yes, trap1 is breakpoint 307 308_trap2: 309 btst #PCB_TRCB,pcbflag | being traced by an HPUX process? 310 jeq _trace | no, trap2 is breakpoint 311 jra sigreturn | yes, trap2 is sigreturn 312 313/* 314 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD) 315 * cachectl(command, addr, length) 316 * command in d0, addr in a1, length in d1 317 */ 318 .globl _cachectl 319_trap12: 320 movl d1,sp@- | push length 321 movl a1,sp@- | push addr 322 movl d0,sp@- | push command 323 jbsr _cachectl | do it 324 lea sp@(12),sp | pop args 325 jra rei | all done 326 327/* 328 * Trap 15 is used for: 329 * - KGDB traps 330 * - trace traps for SUN binaries (not fully supported yet) 331 * We just pass it on and let trap() sort it all out 332 */ 333_trap15: 334 clrw sp@- 335 moveml #0xFFFF,sp@- 336#ifdef KGDB 337 moveq #T_TRAP15,d0 338 movl sp@(64),d1 | from user mode? 339 andl #PSL_S,d1 340 jeq fault 341 movl d0,sp@- 342 .globl _kgdb_trap_glue 343 jbsr _kgdb_trap_glue | returns if no debugger 344 addl #4,sp 345#endif 346 moveq #T_TRAP15,d0 347 jra fault 348 349/* 350 * Hit a breakpoint (trap 1 or 2) instruction. 351 * Push the code and treat as a normal fault. 352 */ 353_trace: 354 clrw sp@- 355 moveml #0xFFFF,sp@- 356#ifdef KGDB 357 moveq #T_TRACE,d0 358 movl sp@(64),d1 | from user mode? 359 andl #PSL_S,d1 360 jeq fault 361 movl d0,sp@- 362 jbsr _kgdb_trap_glue | returns if no debugger 363 addl #4,sp 364#endif 365 moveq #T_TRACE,d0 366 jra fault 367 368/* 369 * The sigreturn() syscall comes here. It requires special handling 370 * because we must open a hole in the stack to fill in the (possibly much 371 * larger) original stack frame. 372 */ 373sigreturn: 374 lea sp@(-84),sp | leave enough space for largest frame 375 movl sp@(84),sp@ | move up current 8 byte frame 376 movl sp@(88),sp@(4) 377 movw #84,sp@- | default: adjust by 84 bytes 378 moveml #0xFFFF,sp@- | save user registers 379 movl usp,a0 | save the user SP 380 movl a0,sp@(60) | in the savearea 381 movl #SYS_sigreturn,sp@- | push syscall number 382 jbsr _syscall | handle it 383 addql #4,sp | pop syscall# 384 movl sp@(60),a0 | grab and restore 385 movl a0,usp | user SP 386 lea sp@(64),a1 | pointer to HW frame 387 movw a1@+,d0 | do we need to adjust the stack? 388 jeq Lsigr1 | no, just continue 389 moveq #92,d1 | total size 390 subw d0,d1 | - hole size = frame size 391 lea a1@(92),a0 | destination 392 addw d1,a1 | source 393 lsrw #1,d1 | convert to word count 394 subqw #1,d1 | minus 1 for dbf 395Lsigrlp: 396 movw a1@-,a0@- | copy a word 397 dbf d1,Lsigrlp | continue 398 movl a0,a1 | new HW frame base 399Lsigr1: 400 movl a1,sp@(60) | new SP value 401 moveml sp@+,#0x7FFF | restore user registers 402 movl sp@,sp | and our SP 403 jra rei | all done 404 405/* 406 * Interrupt handlers. 407 * All DIO device interrupts are auto-vectored. Most can be configured 408 * to interrupt in the range IPL3 to IPL5. Here are our assignments: 409 * 410 * Level 0: Spurious: ignored. 411 * Level 1: HIL 412 * Level 2: 413 * Level 3: Internal HP-IB 414 * Level 4: "Fast" HP-IBs, SCSI 415 * Level 5: DMA, Ethernet, Built-in RS232 416 * Level 6: Clock 417 * Level 7: Non-maskable: parity errors, RESET key 418 */ 419 .globl _hilint, _intrhand, _hardclock, _nmihand 420 421_spurintr: 422 addql #1,_intrcnt+0 423 addql #1,_cnt+V_INTR 424 jra rei 425 426_lev1intr: 427 addql #1,_intrcnt+4 428 clrw sp@- 429 moveml #0xC0C0,sp@- 430 jbsr _hilint 431 moveml sp@+,#0x0303 432 addql #2,sp 433 addql #1,_cnt+V_INTR 434 jra rei 435 436/* check for DMA first to reduce overhead */ 437_lev5intr: 438 clrw sp@- 439 moveml #0xC0C0,sp@- 440 jbsr _dmaintr 441 tstl d0 442 jeq Lnotdma 443 addql #1,_intrcnt+24 444 moveml sp@+,#0x0303 445 addql #2,sp 446 addql #1,_cnt+V_INTR 447 jra rei 448 449_lev2intr: 450_lev3intr: 451_lev4intr: 452 clrw sp@- 453 moveml #0xC0C0,sp@- 454Lnotdma: 455 lea _intrcnt,a0 456 movw sp@(24),d0 | use vector offset 457 andw #0xfff,d0 | sans frame type 458 addql #1,a0@(-0x60,d0:w) | to increment apropos counter 459 movw sr,sp@- | push current SR value 460 clrw sp@- | padded to longword 461 jbsr _intrhand | handle interrupt 462 addql #4,sp | pop SR 463 moveml sp@+,#0x0303 464 addql #2,sp 465 addql #1,_cnt+V_INTR 466 jra rei 467 468_lev6intr: 469 clrw sp@- 470 moveml #0xC0C0,sp@- 471#ifdef DEBUG 472 .globl _panicstr, _regdump, _panic 473 tstl timebomb | set to go off? 474 jeq Lnobomb | no, skip it 475 subql #1,timebomb | decrement 476 jne Lnobomb | not ready to go off 477 moveml sp@+,#0x0303 | temporarily restore regs 478 jra Lbomb | go die 479Lnobomb: 480 cmpl #_kstack+NBPG,sp | are we still in stack pages? 481 jcc Lstackok | yes, continue normally 482 tstl _curproc | if !curproc could have swtch_exit'ed, 483 jeq Lstackok | might be on tmpstk 484 tstl _panicstr | have we paniced? 485 jne Lstackok | yes, do not re-panic 486 lea tmpstk,sp | no, switch to tmpstk 487 moveml #0xFFFF,sp@- | push all registers 488 movl #Lstkrip,sp@- | push panic message 489 jbsr _printf | preview 490 addql #4,sp 491 movl sp,a0 | remember this spot 492 movl #256,sp@- | longword count 493 movl a0,sp@- | and reg pointer 494 jbsr _regdump | dump core 495 addql #8,sp | pop params 496 movl #Lstkrip,sp@- | push panic message 497 jbsr _panic | ES and D 498Lbomb: 499 moveml #0xFFFF,sp@- | push all registers 500 movl sp,a0 | remember this spot 501 movl #256,sp@- | longword count 502 movl a0,sp@- | and reg pointer 503 jbsr _regdump | dump core 504 addql #8,sp | pop params 505 movl #Lbomrip,sp@- | push panic message 506 jbsr _panic | ES and D 507Lstkrip: 508 .asciz "k-stack overflow" 509Lbomrip: 510 .asciz "timebomb" 511 .even 512Lstackok: 513#endif 514 CLKADDR(a0) 515 movb a0@(CLKSR),d0 | read clock status 516#ifdef PROFTIMER 517 .globl _profon 518 tstb _profon | profile clock on? 519 jeq Ltimer1 | no, then must be timer1 interrupt 520 btst #2,d0 | timer3 interrupt? 521 jeq Ltimer1 | no, must be timer1 522 movb a0@(CLKMSB3),d1 | clear timer3 interrupt 523 lea sp@(16),a1 | get pointer to PS 524#ifdef GPROF 525 .globl _profclock 526 movl d0,sp@- | save status so jsr will not clobber 527 movl a1@,sp@- | push padded PS 528 movl a1@(4),sp@- | push PC 529 jbsr _profclock | profclock(pc, ps) 530 addql #8,sp | pop params 531#else 532 btst #5,a1@(2) | saved PS in user mode? 533 jne Lttimer1 | no, go check timer1 534 movl _curpcb,a0 | current pcb 535 tstl a0@(U_PROFSCALE) | process being profiled? 536 jeq Lttimer1 | no, go check timer1 537 movl d0,sp@- | save status so jsr will not clobber 538 movl #1,sp@- 539 pea a0@(U_PROF) 540 movl a1@(4),sp@- 541 jbsr _addupc | addupc(pc, &u.u_prof, 1) 542 lea sp@(12),sp | pop params 543#endif 544 addql #1,_intrcnt+32 | add another profile clock interrupt 545 movl sp@+,d0 | get saved clock status 546 CLKADDR(a0) 547Lttimer1: 548 btst #0,d0 | timer1 interrupt? 549 jeq Ltimend | no, check state of kernel profiling 550Ltimer1: 551#endif 552 movb a0@(CLKMSB1),d1 | clear timer1 interrupt 553 lea sp@(16),a1 | get pointer to PS 554 movl a1@,sp@- | push padded PS 555 movl a1@(4),sp@- | push PC 556 jbsr _hardclock | call generic clock int routine 557 addql #8,sp | pop params 558 addql #1,_intrcnt+28 | add another system clock interrupt 559#ifdef PROFTIMER 560Ltimend: 561#ifdef GPROF 562 .globl _profiling, _startprofclock 563 tstl _profiling | kernel profiling desired? 564 jne Ltimdone | no, all done 565 bset #7,_profon | mark continuous timing 566 jne Ltimdone | was already enabled, all done 567 jbsr _startprofclock | else turn it on 568Ltimdone: 569#endif 570#endif 571 moveml sp@+,#0x0303 | restore scratch regs 572 addql #2,sp | pop pad word 573 addql #1,_cnt+V_INTR | chalk up another interrupt 574 jra rei | all done 575 576_lev7intr: 577#ifdef PROFTIMER 578 addql #1,_intrcnt+36 579#else 580 addql #1,_intrcnt+32 581#endif 582 clrw sp@- | pad SR to longword 583 moveml #0xFFFF,sp@- | save registers 584 movl usp,a0 | and save 585 movl a0,sp@(60) | the user stack pointer 586 jbsr _nmihand | call handler 587 movl sp@(60),a0 | restore 588 movl a0,usp | user SP 589 moveml sp@+,#0x7FFF | and remaining registers 590 addql #6,sp | pop SSP and align word 591 jra rei | all done 592 593/* 594 * Emulation of VAX REI instruction. 595 * 596 * This code deals with checking for and servicing ASTs 597 * (profiling, scheduling) and software interrupts (network, softclock). 598 * We check for ASTs first, just like the VAX. To avoid excess overhead 599 * the T_ASTFLT handling code will also check for software interrupts so we 600 * do not have to do it here. 601 * 602 * This code is complicated by the fact that sendsig may have been called 603 * necessitating a stack cleanup. A cleanup should only be needed at this 604 * point for coprocessor mid-instruction frames (type 9), but we also test 605 * for bus error frames (type 10 and 11). 606 */ 607 .comm _ssir,1 608 .globl _astpending 609rei: 610#ifdef DEBUG 611 tstl _panicstr | have we paniced? 612 jne Ldorte | yes, do not make matters worse 613#endif 614 tstl _astpending | AST pending? 615 jeq Lchksir | no, go check for SIR 616 btst #5,sp@ | yes, are we returning to user mode? 617 jne Lchksir | no, go check for SIR 618 clrw sp@- | pad SR to longword 619 moveml #0xFFFF,sp@- | save all registers 620 movl usp,a1 | including 621 movl a1,sp@(60) | the users SP 622 clrl sp@- | VA == none 623 clrl sp@- | code == none 624 movl #T_ASTFLT,sp@- | type == async system trap 625 jbsr _trap | go handle it 626 lea sp@(12),sp | pop value args 627 movl sp@(60),a0 | restore 628 movl a0,usp | user SP 629 moveml sp@+,#0x7FFF | and all remaining registers 630 addql #4,sp | toss SSP 631 tstw sp@+ | do we need to clean up stack? 632 jeq Ldorte | no, just continue 633 btst #7,sp@(6) | type 9/10/11 frame? 634 jeq Ldorte | no, nothing to do 635 btst #5,sp@(6) | type 9? 636 jne Last1 | no, skip 637 movw sp@,sp@(12) | yes, push down SR 638 movl sp@(2),sp@(14) | and PC 639 clrw sp@(18) | and mark as type 0 frame 640 lea sp@(12),sp | clean the excess 641 jra Ldorte | all done 642Last1: 643 btst #4,sp@(6) | type 10? 644 jne Last2 | no, skip 645 movw sp@,sp@(24) | yes, push down SR 646 movl sp@(2),sp@(26) | and PC 647 clrw sp@(30) | and mark as type 0 frame 648 lea sp@(24),sp | clean the excess 649 jra Ldorte | all done 650Last2: 651 movw sp@,sp@(84) | type 11, push down SR 652 movl sp@(2),sp@(86) | and PC 653 clrw sp@(90) | and mark as type 0 frame 654 lea sp@(84),sp | clean the excess 655 jra Ldorte | all done 656Lchksir: 657 tstb _ssir | SIR pending? 658 jeq Ldorte | no, all done 659 movl d0,sp@- | need a scratch register 660 movw sp@(4),d0 | get SR 661 andw #PSL_IPL7,d0 | mask all but IPL 662 jne Lnosir | came from interrupt, no can do 663 movl sp@+,d0 | restore scratch register 664Lgotsir: 665 movw #SPL1,sr | prevent others from servicing int 666 tstb _ssir | too late? 667 jeq Ldorte | yes, oh well... 668 clrw sp@- | pad SR to longword 669 moveml #0xFFFF,sp@- | save all registers 670 movl usp,a1 | including 671 movl a1,sp@(60) | the users SP 672 clrl sp@- | VA == none 673 clrl sp@- | code == none 674 movl #T_SSIR,sp@- | type == software interrupt 675 jbsr _trap | go handle it 676 lea sp@(12),sp | pop value args 677 movl sp@(60),a0 | restore 678 movl a0,usp | user SP 679 moveml sp@+,#0x7FFF | and all remaining registers 680 addql #6,sp | pop SSP and align word 681 rte 682Lnosir: 683 movl sp@+,d0 | restore scratch register 684Ldorte: 685 rte | real return 686 687/* 688 * Kernel access to the current processes kernel stack is via a fixed 689 * virtual address. It is at the same address as in the users VA space. 690 * Umap contains the KVA of the first of UPAGES PTEs mapping VA _kstack. 691 */ 692 .data 693 .set _kstack,USRSTACK 694_Umap: .long 0 695 .globl _kstack, _Umap 696 697#define RELOC(var, ar) \ 698 lea var,ar; \ 699 addl a5,ar 700 701/* 702 * Initialization 703 * 704 * A5 contains physical load point from boot 705 * VBR contains zero from ROM. Exceptions will continue to vector 706 * through ROM until MMU is turned on at which time they will vector 707 * through our table (vectors.s). 708 */ 709 .comm _lowram,4 710 711 .text 712 .globl _edata 713 .globl _etext,_end 714 .globl start 715start: 716 movw #PSL_HIGHIPL,sr | no interrupts 717 RELOC(tmpstk, a0) 718 movl a0,sp | give ourselves a temporary stack 719 RELOC(_lowram, a0) 720 movl a5,a0@ | store start of physical memory 721 movl #CACHE_OFF,d0 722 movc d0,cacr | clear and disable on-chip cache(s) 723 724/* determine our CPU/MMU combo - check for all regardless of kernel config */ 725 movl #INTIOBASE+MMUBASE,a1 726 movl #0x200,d0 | data freeze bit 727 movc d0,cacr | only exists on 68030 728 movc cacr,d0 | read it back 729 tstl d0 | zero? 730 jeq Lis68020 | yes, we have 68020 731 RELOC(_mmutype, a0) | no, we have 68030 732 movl #-1,a0@ | set to reflect 68030 PMMU 733 RELOC(_machineid, a0) 734 movl #0x80,a1@(MMUCMD) | set magic cookie 735 movl a1@(MMUCMD),d0 | read it back 736 btst #7,d0 | cookie still on? 737 jeq Lnot370 | no, 360 or 375 738 movl #0,a1@(MMUCMD) | clear magic cookie 739 movl a1@(MMUCMD),d0 | read it back 740 btst #7,d0 | still on? 741 jeq Lisa370 | no, must be a 370 742 movl #5,a0@ | yes, must be a 340 743 jra Lstart1 744Lnot370: 745 movl #3,a0@ | type is at least a 360 746 movl #0,a1@(MMUCMD) | clear magic cookie2 747 movl a1@(MMUCMD),d0 | read it back 748 btst #16,d0 | still on? 749 jeq Lstart1 | no, must be a 360 750 movl #6,a0@ | yes, must be a 345/375 751 jra Lhaspac 752Lisa370: 753 movl #4,a0@ | set to 370 754Lhaspac: 755 RELOC(_ectype, a0) 756 movl #-1,a0@ | also has a physical address cache 757 jra Lstart1 758Lis68020: 759 movl #1,a1@(MMUCMD) | a 68020, write HP MMU location 760 movl a1@(MMUCMD),d0 | read it back 761 btst #0,d0 | non-zero? 762 jne Lishpmmu | yes, we have HP MMU 763 RELOC(_mmutype, a0) 764 movl #1,a0@ | no, we have PMMU 765 RELOC(_machineid, a0) 766 movl #1,a0@ | and 330 CPU 767 jra Lstart1 768Lishpmmu: 769 RELOC(_ectype, a0) | 320 or 350 770 movl #1,a0@ | both have a virtual address cache 771 movl #0x80,a1@(MMUCMD) | set magic cookie 772 movl a1@(MMUCMD),d0 | read it back 773 btst #7,d0 | cookie still on? 774 jeq Lstart1 | no, just a 320 775 RELOC(_machineid, a0) 776 movl #2,a0@ | yes, a 350 777 778Lstart1: 779 movl #0,a1@(MMUCMD) | clear out MMU again 780/* initialize source/destination control registers for movs */ 781 moveq #FC_USERD,d0 | user space 782 movc d0,sfc | as source 783 movc d0,dfc | and destination of transfers 784 785/* 786 * Allocate kernel segment/page table resources. 787 * a5 contains the PA of lowest RAM page 788 * a4 contains the PA of first available page at any time 789 * d5 contains the VA of first available page at any time 790 * (since we assume a zero load point, it is also the size of 791 * allocated space at any time) 792 * We assume (i.e. do not check) that the initial page table size 793 * (Sysptsize) is big enough to map everything we allocate here. 794 * 795 * We allocate the IO maps here since the 320/350 MMU registers are 796 * mapped in this range and it would be nice to be able to access them 797 * after the MMU is turned on. 798 */ 799 .globl _Sysseg, _Sysmap, _Sysptmap, _Sysptsize 800 movl #_end,d5 | end of static kernel text/data 801 addl #NBPG-1,d5 802 andl #PG_FRAME,d5 | round to a page 803 movl d5,a4 804 addl a5,a4 805/* allocate kernel segment table */ 806 RELOC(_Sysseg, a0) 807 movl d5,a0@ | remember VA for pmap module 808 movl a4,sp@- | remember PA for loading MMU 809 addl #NBPG,a4 810 addl #NBPG,d5 811/* allocate initial page table pages (including internal IO map) */ 812 RELOC(_Sysptsize, a0) 813 movl a0@,d0 | initial system PT size (pages) 814 addl #(IIOMAPSIZE+EIOMAPSIZE+NPTEPG-1)/NPTEPG,d0 815 | add pages for IO maps 816 movl #PGSHIFT,d1 817 lsll d1,d0 | convert to bytes 818 movl a4,sp@- | remember PA for ST load 819 addl d0,a4 820 addl d0,d5 821/* allocate kernel page table map */ 822 RELOC(_Sysptmap, a0) 823 movl d5,a0@ | remember VA for pmap module 824 movl a4,sp@- | remember PA for PT map load 825 addl #NBPG,a4 826 addl #NBPG,d5 827/* compute KVA of Sysptmap; mapped after page table pages */ 828 movl d0,d2 | remember PT size (bytes) 829 moveq #SG_ISHIFT-PGSHIFT,d1 830 lsll d1,d0 | page table size serves as seg index 831 RELOC(_Sysmap, a0) 832 movl d0,a0@ | remember VA for pmap module 833/* initialize ST and PT map: PT pages + PT map */ 834 movl sp@+,a1 | PT map PA 835 movl sp@+,d4 | start of PT pages 836 movl sp@+,a0 | ST phys addr 837 lea a0@(NBPG-4),a2 | (almost) end of ST 838 movl d4,d3 839 orl #SG_RW+SG_V,d4 | create proto STE for ST 840 orl #PG_RW+PG_CI+PG_V,d3 | create proto PTE for PT map 841List1: 842 movl d4,a0@+ 843 movl d3,a1@+ 844 addl #NBPG,d4 845 addl #NBPG,d3 846 cmpl a4,d4 | sleezy, but works ok 847 jcs List1 848/* initialize ST and PT map: invalidate up to last entry */ 849List2: 850 movl #SG_NV,a0@+ 851 movl #PG_NV,a1@+ 852 cmpl a2,a0 853 jcs List2 854/* 855 * Portions of the last segment of KVA space (0xFFF00000 - 0xFFFFFFFF) 856 * are mapped for a couple of purposes. 0xFFF00000 for UPAGES is used 857 * for mapping the current process u-area (u + kernel stack). The 858 * very last page (0xFFFFF000) is mapped to the last physical page of 859 * RAM to give us a region in which PA == VA. We use this page for 860 * enabling/disabling mapping. 861 */ 862 movl a4,d1 | grab next available for PT page 863 andl #SG_FRAME,d1 | mask to frame number 864 orl #SG_RW+SG_V,d1 | RW and valid 865 movl d1,a0@+ | store in last ST entry 866 movl a0,a2 | remember addr for PT load 867 andl #PG_FRAME,d1 868 orl #PG_RW+PG_V,d1 | convert to PTE 869 movl d1,a1@+ | store in PT map 870 movl a4,a0 | physical beginning of PT page 871 lea a0@(NBPG-4),a1 | (almost) end of page 872Lispt7: 873 movl #PG_NV,a0@+ | invalidate 874 cmpl a1,a0 875 jcs Lispt7 876 movl #MAXADDR,d1 | get last phys page addr 877 andl #PG_FRAME,d1 878 orl #PG_RW+PG_V,d1 879 movl d1,a0@+ | map to last virt page 880 addl #NBPG,a4 881 addl #NBPG,d5 882/* record KVA at which to access current u-area PTEs */ 883 RELOC(_Sysmap, a0) 884 movl a0@,d0 | get system PT address 885 addl #NPTEPG*NBPG,d0 | end of system PT 886 subl #HIGHPAGES*4,d0 | back up to first PTE for u-area 887 RELOC(_Umap, a0) 888 movl d0,a0@ | remember location 889/* initialize page table pages */ 890 movl a2,a0 | end of ST is start of PT 891 addl d2,a2 | add size to get end of PT 892/* text pages are read-only */ 893 clrl d0 | assume load at VA 0 894 movl a5,d1 | get load PA 895 andl #PG_FRAME,d1 | convert to a page frame 896#ifdef KGDB 897 orl #PG_RW+PG_V,d1 | XXX: RW for now 898#else 899 orl #PG_RO+PG_V,d1 | create proto PTE 900#endif 901 movl #_etext,a1 | go til end of text 902Lipt1: 903 movl d1,a0@+ | load PTE 904 addl #NBPG,d1 | increment page frame number 905 addl #NBPG,d0 | and address counter 906 cmpl a1,d0 | done yet? 907 jcs Lipt1 | no, keep going 908/* data, bss and dynamic tables are read/write */ 909 andl #PG_FRAME,d1 | mask out old prot bits 910 orl #PG_RW+PG_V,d1 | mark as valid and RW 911 movl d5,a1 | go til end of data allocated so far 912 addl #(UPAGES+1)*NBPG,a1 | and proc0 PT/u-area (to be allocated) 913Lipt2: 914 movl d1,a0@+ | load PTE 915 addl #NBPG,d1 | increment page frame number 916 addl #NBPG,d0 | and address counter 917 cmpl a1,d0 | done yet? 918 jcs Lipt2 | no, keep going 919/* invalidate remainder of kernel PT */ 920 movl a2,a1 | end of PT 921Lipt3: 922 movl #PG_NV,a0@+ | invalidate PTE 923 cmpl a1,a0 | done yet? 924 jcs Lipt3 | no, keep going 925/* go back and validate internal IO PTEs at end of allocated PT space */ 926 movl a2,a0 | end of allocated PT space 927 subl #(IIOMAPSIZE+EIOMAPSIZE)*4,a0 | back up IOMAPSIZE PTEs 928 subl #EIOMAPSIZE*4,a2 | only initialize internal IO PTEs 929 movl #INTIOBASE,d1 | physical internal IO base 930 orl #PG_RW+PG_CI+PG_V,d1 | create proto PTE 931Lipt4: 932 movl d1,a0@+ | load PTE 933 addl #NBPG,d1 | increment page frame number 934 cmpl a2,a0 | done yet? 935 jcs Lipt4 | no, keep going 936/* record base KVA of IO spaces which are just before Sysmap */ 937 RELOC(_Sysmap, a0) 938 movl a0@,d0 | Sysmap VA 939 subl #EIOMAPSIZE*NBPG,d0 | back up size of external IO space 940 RELOC(_extiobase, a0) 941 movl d0,a0@ | and record 942 RELOC(_intiolimit, a0) 943 movl d0,a0@ | external base is also internal limit 944 subl #IIOMAPSIZE*NBPG,d0 | back up size of internal IO space 945 RELOC(_intiobase, a0) 946 movl d0,a0@ | and record 947/* also record base of clock and MMU registers for fast access */ 948 addl #CLKBASE,d0 949 RELOC(_CLKbase, a0) 950 movl d0,a0@ 951 subl #CLKBASE,d0 952 addl #MMUBASE,d0 953 RELOC(_MMUbase, a0) 954 movl d0,a0@ 955 956/* 957 * Setup page table for process 0. 958 * 959 * We set up page table access for the kernel via Usrptmap (usrpt) 960 * and access to the u-area itself via Umap (u). First available 961 * page (VA: d5, PA: a4) is used for proc0 page table. Next UPAGES 962 * pages following are for u-area. 963 */ 964 movl a4,d0 965 movl d0,d1 966 andl #PG_FRAME,d1 | mask to page frame number 967 orl #PG_RW+PG_V,d1 | RW and valid 968 movl d1,d4 | remember for later Usrptmap load 969 movl d0,a0 | base of proc0 PT 970 addl #NBPG,d0 | plus one page yields base of u-area 971 movl d0,a2 | and end of PT 972 addl #NBPG,d5 | keep VA in sync 973/* invalidate entire page table */ 974Liudot1: 975 movl #PG_NV,a0@+ | invalidate PTE 976 cmpl a2,a0 | done yet? 977 jcs Liudot1 | no, keep going 978/* now go back and validate u-area PTEs in PT and in Umap */ 979 lea a0@(-HIGHPAGES*4),a0 | base of PTEs for u-area (p_addr) 980 lea a0@(UPAGES*4),a1 | end of PTEs for u-area 981 lea a4@(-HIGHPAGES*4),a3 | u-area PTE base in Umap PT 982 movl d0,d1 | get base of u-area 983 andl #PG_FRAME,d1 | mask to page frame number 984 orl #PG_RW+PG_V,d1 | add valid and writable 985Liudot2: 986 movl d1,a0@+ | validate p_addr PTE 987 movl d1,a3@+ | validate u PTE 988 addl #NBPG,d1 | to next page 989 cmpl a1,a0 | done yet? 990 jcs Liudot2 | no, keep going 991/* clear process 0 u-area */ 992 addl #NBPG*UPAGES,d0 | end of u-area 993Lclru1: 994 clrl a2@+ | clear 995 cmpl d0,a2 | done yet? 996 jcs Lclru1 | no, keep going 997 movl a2,a4 | save phys addr of first avail page 998 RELOC(_proc0paddr, a0) 999 movl d5,a0@ | save KVA of proc0 u-area 1000 addl #UPAGES*NBPG,d5 | increment virtual addr as well 1001 1002/* 1003 * Prepare to enable MMU. 1004 * Since the kernel is not mapped logical == physical we must insure 1005 * that when the MMU is turned on, all prefetched addresses (including 1006 * the PC) are valid. In order guarentee that, we use the last physical 1007 * page (which is conveniently mapped == VA) and load it up with enough 1008 * code to defeat the prefetch, then we execute the jump back to here. 1009 * 1010 * Is this all really necessary, or am I paranoid?? 1011 */ 1012 RELOC(_Sysseg, a0) | system segment table addr 1013 movl a0@,a1 | read value (a KVA) 1014 addl a5,a1 | convert to PA 1015 RELOC(_mmutype, a0) 1016 tstl a0@ | HP MMU? 1017 jeq Lhpmmu2 | yes, skip 1018 RELOC(_protorp, a0) 1019 movl #0x80000202,a0@ | nolimit + share global + 4 byte PTEs 1020 movl a1,a0@(4) | + segtable address 1021 pmove a0@,srp | load the supervisor root pointer 1022 movl #0x80000002,a0@ | reinit upper half for CRP loads 1023 jra Lstploaddone | done 1024Lhpmmu2: 1025 movl a1,d1 1026 moveq #PGSHIFT,d2 1027 lsrl d2,d1 | convert to page frame 1028 movl d1,INTIOBASE+MMUBASE+MMUSSTP | load in sysseg table register 1029Lstploaddone: 1030 lea MAXADDR,a2 | PA of last RAM page 1031 RELOC(Lhighcode, a1) | addr of high code 1032 RELOC(Lehighcode, a3) | end addr 1033Lcodecopy: 1034 movw a1@+,a2@+ | copy a word 1035 cmpl a3,a1 | done yet? 1036 jcs Lcodecopy | no, keep going 1037 jmp MAXADDR | go for it! 1038 1039Lhighcode: 1040 RELOC(_mmutype, a0) 1041 tstl a0@ | HP MMU? 1042 jeq Lhpmmu3 | yes, skip 1043 movl #MMU_IEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD 1044 | enable 68881 and i-cache 1045 movl #0x82c0aa00,a2@ | value to load TC with 1046 pmove a2@,tc | load it 1047 jmp Lenab1 1048Lhpmmu3: 1049 movl #0,INTIOBASE+MMUBASE+MMUCMD | clear external cache 1050 movl #MMU_ENAB,INTIOBASE+MMUBASE+MMUCMD | turn on MMU 1051 jmp Lenab1 | jmp to mapped code 1052Lehighcode: 1053 1054/* 1055 * Should be running mapped from this point on 1056 */ 1057Lenab1: 1058/* check for internal HP-IB in SYSFLAG */ 1059 btst #5,0xfffffed2 | internal HP-IB? 1060 jeq Linitmem | yes, have HP-IB just continue 1061 clrl _internalhpib | no, clear associated address 1062/* init mem sizes */ 1063Linitmem: 1064 movl #MAXADDR,d1 | last page 1065 moveq #PGSHIFT,d2 1066 lsrl d2,d1 | convert to page (click) number 1067 movl d1,_maxmem | save as maxmem 1068 movl _lowram,d0 | lowram value from ROM via boot 1069 lsrl d2,d0 | convert to page number 1070 subl d0,d1 | compute amount of RAM present 1071 movl d1,_physmem | and physmem 1072/* 1073 * pmap_bootstrap is supposed to be called with mapping off early on 1074 * to set up the kernel VA space. However, this only works easily if 1075 * you have a kernel PA == VA mapping. Since we do not, we just set 1076 * up and enable mapping here and then call the bootstrap routine to 1077 * get the pmap module in sync with reality. 1078 */ 1079 .globl _avail_start 1080 lea tmpstk,sp | temporary stack 1081 movl a5,sp@- | phys load address (assumes VA 0) 1082 movl a4,sp@- | first available PA 1083 jbsr _pmap_bootstrap | sync up pmap module 1084 addql #8,sp 1085| movl _avail_start,a4 | pmap_bootstrap may need RAM 1086/* set kernel stack, user SP, and initial pcb */ 1087 lea _kstack,a1 | proc0 kernel stack 1088 lea a1@(UPAGES*NBPG-4),sp | set kernel stack to end of area 1089 movl #USRSTACK-4,a2 1090 movl a2,usp | init user SP 1091 movl _proc0paddr,a1 | get proc0 pcb addr 1092 movl a1,_curpcb | proc0 is running 1093 clrw a1@(PCB_FLAGS) | clear flags 1094#ifdef FPCOPROC 1095 clrl a1@(PCB_FPCTX) | ensure null FP context 1096 movl a1,sp@- 1097 jbsr _m68881_restore | restore it (does not kill a1) 1098 addql #4,sp 1099#endif 1100/* flush TLB and turn on caches */ 1101 jbsr _TBIA | invalidate TLB 1102 movl #CACHE_ON,d0 1103 movc d0,cacr | clear cache(s) 1104 tstl _ectype 1105 jeq Lnocache0 1106 MMUADDR(a0) 1107 orl #MMU_CEN,a0@(MMUCMD) | turn on external cache 1108Lnocache0: 1109/* final setup for C code */ 1110 movw #PSL_LOWIPL,sr | lower SPL 1111 movl d7,_boothowto | save reboot flags 1112 movl d6,_bootdev | and boot device 1113 jbsr _main | call main() 1114 1115/* proc[1] == init now running here; 1116 * create a null exception frame and return to user mode in icode 1117 */ 1118 clrw sp@- | vector offset/frame type 1119 clrl sp@- | return to icode location 0 1120 movw #PSL_USER,sp@- | in user mode 1121 rte 1122 1123/* 1124 * Signal "trampoline" code (18 bytes). Invoked from RTE setup by sendsig(). 1125 * 1126 * Stack looks like: 1127 * 1128 * sp+0 -> signal number 1129 * sp+4 signal specific code 1130 * sp+8 pointer to signal context frame (scp) 1131 * sp+12 address of handler 1132 * sp+16 saved hardware state 1133 * . 1134 * . 1135 * scp+0-> beginning of signal context frame 1136 */ 1137 .globl _sigcode, _esigcode 1138 .data 1139_sigcode: 1140 movl sp@(12),a0 | signal handler addr (4 bytes) 1141 jsr a0@ | call signal handler (2 bytes) 1142 addql #4,sp | pop signo (2 bytes) 1143 trap #1 | special syscall entry (2 bytes) 1144 movl d0,sp@(4) | save errno (4 bytes) 1145 moveq #1,d0 | syscall == exit (2 bytes) 1146 trap #0 | exit(errno) (2 bytes) 1147 .align 2 1148_esigcode: 1149 1150/* 1151 * Icode is copied out to process 1 to exec init. 1152 * If the exec fails, process 1 exits. 1153 */ 1154 .globl _icode,_szicode 1155 .text 1156_icode: 1157 clrl sp@- 1158 pea pc@((argv-.)+2) 1159 pea pc@((init-.)+2) 1160 clrl sp@- 1161 moveq #SYS_execve,d0 1162 trap #0 1163 moveq #SYS_exit,d0 1164 trap #0 1165init: 1166 .asciz "/sbin/init" 1167 .even 1168argv: 1169 .long init+6-_icode | argv[0] = "init" ("/sbin/init" + 6) 1170 .long eicode-_icode | argv[1] follows icode after copyout 1171 .long 0 1172eicode: 1173 1174_szicode: 1175 .long _szicode-_icode 1176 1177/* 1178 * Primitives 1179 */ 1180 1181#ifdef GPROF 1182#define ENTRY(name) \ 1183 .globl _/**/name; _/**/name: link a6,#0; jbsr mcount; unlk a6 1184#define ALTENTRY(name, rname) \ 1185 ENTRY(name); jra rname+12 1186#else 1187#define ENTRY(name) \ 1188 .globl _/**/name; _/**/name: 1189#define ALTENTRY(name, rname) \ 1190 .globl _/**/name; _/**/name: 1191#endif 1192 1193/* 1194 * update profiling information for the user 1195 * addupc(pc, &u.u_prof, ticks) 1196 */ 1197ENTRY(addupc) 1198 movl a2,sp@- | scratch register 1199 movl sp@(12),a2 | get &u.u_prof 1200 movl sp@(8),d0 | get user pc 1201 subl a2@(8),d0 | pc -= pr->pr_off 1202 jlt Lauexit | less than 0, skip it 1203 movl a2@(12),d1 | get pr->pr_scale 1204 lsrl #1,d0 | pc /= 2 1205 lsrl #1,d1 | scale /= 2 1206 mulul d1,d0 | pc /= scale 1207 moveq #14,d1 1208 lsrl d1,d0 | pc >>= 14 1209 bclr #0,d0 | pc &= ~1 1210 cmpl a2@(4),d0 | too big for buffer? 1211 jge Lauexit | yes, screw it 1212 addl a2@,d0 | no, add base 1213 movl d0,sp@- | push address 1214 jbsr _fusword | grab old value 1215 movl sp@+,a0 | grab address back 1216 cmpl #-1,d0 | access ok 1217 jeq Lauerror | no, skip out 1218 addw sp@(18),d0 | add tick to current value 1219 movl d0,sp@- | push value 1220 movl a0,sp@- | push address 1221 jbsr _susword | write back new value 1222 addql #8,sp | pop params 1223 tstl d0 | fault? 1224 jeq Lauexit | no, all done 1225Lauerror: 1226 clrl a2@(12) | clear scale (turn off prof) 1227Lauexit: 1228 movl sp@+,a2 | restore scratch reg 1229 rts 1230 1231/* 1232 * copyinstr(fromaddr, toaddr, maxlength, &lencopied) 1233 * 1234 * Copy a null terminated string from the user address space into 1235 * the kernel address space. 1236 * NOTE: maxlength must be < 64K 1237 */ 1238ENTRY(copyinstr) 1239 movl _curpcb,a0 | current pcb 1240 movl #Lcisflt1,a0@(PCB_ONFAULT) | set up to catch faults 1241 movl sp@(4),a0 | a0 = fromaddr 1242 movl sp@(8),a1 | a1 = toaddr 1243 moveq #0,d0 1244 movw sp@(14),d0 | d0 = maxlength 1245 jlt Lcisflt1 | negative count, error 1246 jeq Lcisdone | zero count, all done 1247 subql #1,d0 | set up for dbeq 1248Lcisloop: 1249 movsb a0@+,d1 | grab a byte 1250 movb d1,a1@+ | copy it 1251 dbeq d0,Lcisloop | if !null and more, continue 1252 jne Lcisflt2 | ran out of room, error 1253 moveq #0,d0 | got a null, all done 1254Lcisdone: 1255 tstl sp@(16) | return length desired? 1256 jeq Lcisret | no, just return 1257 subl sp@(4),a0 | determine how much was copied 1258 movl sp@(16),a1 | return location 1259 movl a0,a1@ | stash it 1260Lcisret: 1261 movl _curpcb,a0 | current pcb 1262 clrl a0@(PCB_ONFAULT) | clear fault addr 1263 rts 1264Lcisflt1: 1265 moveq #EFAULT,d0 | copy fault 1266 jra Lcisdone 1267Lcisflt2: 1268 moveq #ENAMETOOLONG,d0 | ran out of space 1269 jra Lcisdone 1270 1271/* 1272 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied) 1273 * 1274 * Copy a null terminated string from the kernel 1275 * address space to the user address space. 1276 * NOTE: maxlength must be < 64K 1277 */ 1278ENTRY(copyoutstr) 1279 movl _curpcb,a0 | current pcb 1280 movl #Lcosflt1,a0@(PCB_ONFAULT) | set up to catch faults 1281 movl sp@(4),a0 | a0 = fromaddr 1282 movl sp@(8),a1 | a1 = toaddr 1283 moveq #0,d0 1284 movw sp@(14),d0 | d0 = maxlength 1285 jlt Lcosflt1 | negative count, error 1286 jeq Lcosdone | zero count, all done 1287 subql #1,d0 | set up for dbeq 1288Lcosloop: 1289 movb a0@+,d1 | grab a byte 1290 movsb d1,a1@+ | copy it 1291 dbeq d0,Lcosloop | if !null and more, continue 1292 jne Lcosflt2 | ran out of room, error 1293 moveq #0,d0 | got a null, all done 1294Lcosdone: 1295 tstl sp@(16) | return length desired? 1296 jeq Lcosret | no, just return 1297 subl sp@(4),a0 | determine how much was copied 1298 movl sp@(16),a1 | return location 1299 movl a0,a1@ | stash it 1300Lcosret: 1301 movl _curpcb,a0 | current pcb 1302 clrl a0@(PCB_ONFAULT) | clear fault addr 1303 rts 1304Lcosflt1: 1305 moveq #EFAULT,d0 | copy fault 1306 jra Lcosdone 1307Lcosflt2: 1308 moveq #ENAMETOOLONG,d0 | ran out of space 1309 jra Lcosdone 1310 1311/* 1312 * copystr(fromaddr, toaddr, maxlength, &lencopied) 1313 * 1314 * Copy a null terminated string from one point to another in 1315 * the kernel address space. 1316 * NOTE: maxlength must be < 64K 1317 */ 1318ENTRY(copystr) 1319 movl sp@(4),a0 | a0 = fromaddr 1320 movl sp@(8),a1 | a1 = toaddr 1321 moveq #0,d0 1322 movw sp@(14),d0 | d0 = maxlength 1323 jlt Lcsflt1 | negative count, error 1324 jeq Lcsdone | zero count, all done 1325 subql #1,d0 | set up for dbeq 1326Lcsloop: 1327 movb a0@+,a1@+ | copy a byte 1328 dbeq d0,Lcsloop | if !null and more, continue 1329 jne Lcsflt2 | ran out of room, error 1330 moveq #0,d0 | got a null, all done 1331Lcsdone: 1332 tstl sp@(16) | return length desired? 1333 jeq Lcsret | no, just return 1334 subl sp@(4),a0 | determine how much was copied 1335 movl sp@(16),a1 | return location 1336 movl a0,a1@ | stash it 1337Lcsret: 1338 rts 1339Lcsflt1: 1340 moveq #EFAULT,d0 | copy fault 1341 jra Lcsdone 1342Lcsflt2: 1343 moveq #ENAMETOOLONG,d0 | ran out of space 1344 jra Lcsdone 1345 1346/* 1347 * Copyin(from, to, len) 1348 * 1349 * Copy specified amount of data from user space into the kernel. 1350 * NOTE: len must be < 64K 1351 */ 1352ENTRY(copyin) 1353 movl d2,sp@- | scratch register 1354 movl _curpcb,a0 | current pcb 1355 movl #Lciflt,a0@(PCB_ONFAULT) | set up to catch faults 1356 movl sp@(16),d2 | check count 1357 jlt Lciflt | negative, error 1358 jeq Lcidone | zero, done 1359 movl sp@(8),a0 | src address 1360 movl sp@(12),a1 | dest address 1361 movl a0,d0 1362 btst #0,d0 | src address odd? 1363 jeq Lcieven | no, go check dest 1364 movsb a0@+,d1 | yes, get a byte 1365 movb d1,a1@+ | put a byte 1366 subql #1,d2 | adjust count 1367 jeq Lcidone | exit if done 1368Lcieven: 1369 movl a1,d0 1370 btst #0,d0 | dest address odd? 1371 jne Lcibyte | yes, must copy by bytes 1372 movl d2,d0 | no, get count 1373 lsrl #2,d0 | convert to longwords 1374 jeq Lcibyte | no longwords, copy bytes 1375 subql #1,d0 | set up for dbf 1376Lcilloop: 1377 movsl a0@+,d1 | get a long 1378 movl d1,a1@+ | put a long 1379 dbf d0,Lcilloop | til done 1380 andl #3,d2 | what remains 1381 jeq Lcidone | all done 1382Lcibyte: 1383 subql #1,d2 | set up for dbf 1384Lcibloop: 1385 movsb a0@+,d1 | get a byte 1386 movb d1,a1@+ | put a byte 1387 dbf d2,Lcibloop | til done 1388Lcidone: 1389 moveq #0,d0 | success 1390Lciexit: 1391 movl _curpcb,a0 | current pcb 1392 clrl a0@(PCB_ONFAULT) | clear fault catcher 1393 movl sp@+,d2 | restore scratch reg 1394 rts 1395Lciflt: 1396 moveq #EFAULT,d0 | got a fault 1397 jra Lciexit 1398 1399/* 1400 * Copyout(from, to, len) 1401 * 1402 * Copy specified amount of data from kernel to the user space 1403 * NOTE: len must be < 64K 1404 */ 1405ENTRY(copyout) 1406 movl d2,sp@- | scratch register 1407 movl _curpcb,a0 | current pcb 1408 movl #Lcoflt,a0@(PCB_ONFAULT) | catch faults 1409 movl sp@(16),d2 | check count 1410 jlt Lcoflt | negative, error 1411 jeq Lcodone | zero, done 1412 movl sp@(8),a0 | src address 1413 movl sp@(12),a1 | dest address 1414 movl a0,d0 1415 btst #0,d0 | src address odd? 1416 jeq Lcoeven | no, go check dest 1417 movb a0@+,d1 | yes, get a byte 1418 movsb d1,a1@+ | put a byte 1419 subql #1,d2 | adjust count 1420 jeq Lcodone | exit if done 1421Lcoeven: 1422 movl a1,d0 1423 btst #0,d0 | dest address odd? 1424 jne Lcobyte | yes, must copy by bytes 1425 movl d2,d0 | no, get count 1426 lsrl #2,d0 | convert to longwords 1427 jeq Lcobyte | no longwords, copy bytes 1428 subql #1,d0 | set up for dbf 1429Lcolloop: 1430 movl a0@+,d1 | get a long 1431 movsl d1,a1@+ | put a long 1432 dbf d0,Lcolloop | til done 1433 andl #3,d2 | what remains 1434 jeq Lcodone | all done 1435Lcobyte: 1436 subql #1,d2 | set up for dbf 1437Lcobloop: 1438 movb a0@+,d1 | get a byte 1439 movsb d1,a1@+ | put a byte 1440 dbf d2,Lcobloop | til done 1441Lcodone: 1442 moveq #0,d0 | success 1443Lcoexit: 1444 movl _curpcb,a0 | current pcb 1445 clrl a0@(PCB_ONFAULT) | clear fault catcher 1446 movl sp@+,d2 | restore scratch reg 1447 rts 1448Lcoflt: 1449 moveq #EFAULT,d0 | got a fault 1450 jra Lcoexit 1451 1452/* 1453 * non-local gotos 1454 */ 1455ENTRY(setjmp) 1456 movl sp@(4),a0 | savearea pointer 1457 moveml #0xFCFC,a0@ | save d2-d7/a2-a7 1458 movl sp@,a0@(48) | and return address 1459 moveq #0,d0 | return 0 1460 rts 1461 1462ENTRY(qsetjmp) 1463 movl sp@(4),a0 | savearea pointer 1464 lea a0@(40),a0 | skip regs we do not save 1465 movl a6,a0@+ | save FP 1466 movl sp,a0@+ | save SP 1467 movl sp@,a0@ | and return address 1468 moveq #0,d0 | return 0 1469 rts 1470 1471ENTRY(longjmp) 1472 movl sp@(4),a0 1473 moveml a0@+,#0xFCFC 1474 movl a0@,sp@ 1475 moveq #1,d0 1476 rts 1477 1478/* 1479 * The following primitives manipulate the run queues. 1480 * _whichqs tells which of the 32 queues _qs 1481 * have processes in them. Setrq puts processes into queues, Remrq 1482 * removes them from queues. The running process is on no queue, 1483 * other processes are on a queue related to p->p_pri, divided by 4 1484 * actually to shrink the 0-127 range of priorities into the 32 available 1485 * queues. 1486 */ 1487 1488 .globl _whichqs,_qs,_cnt,_panic 1489 .globl _curproc 1490 .comm _want_resched,4 1491 1492/* 1493 * Setrq(p) 1494 * 1495 * Call should be made at spl6(), and p->p_stat should be SRUN 1496 */ 1497ENTRY(setrq) 1498 movl sp@(4),a0 1499 tstl a0@(P_RLINK) 1500 jeq Lset1 1501 movl #Lset2,sp@- 1502 jbsr _panic 1503Lset1: 1504 clrl d0 1505 movb a0@(P_PRI),d0 1506 lsrb #2,d0 1507 movl _whichqs,d1 1508 bset d0,d1 1509 movl d1,_whichqs 1510 lslb #3,d0 1511 addl #_qs,d0 1512 movl d0,a0@(P_LINK) 1513 movl d0,a1 1514 movl a1@(P_RLINK),a0@(P_RLINK) 1515 movl a0,a1@(P_RLINK) 1516 movl a0@(P_RLINK),a1 1517 movl a0,a1@(P_LINK) 1518 rts 1519 1520Lset2: 1521 .asciz "setrq" 1522 .even 1523 1524/* 1525 * Remrq(p) 1526 * 1527 * Call should be made at spl6(). 1528 */ 1529ENTRY(remrq) 1530 movl sp@(4),a0 1531 clrl d0 1532 movb a0@(P_PRI),d0 1533 lsrb #2,d0 1534 movl _whichqs,d1 1535 bclr d0,d1 1536 jne Lrem1 1537 movl #Lrem3,sp@- 1538 jbsr _panic 1539Lrem1: 1540 movl d1,_whichqs 1541 movl a0@(P_LINK),a1 1542 movl a0@(P_RLINK),a1@(P_RLINK) 1543 movl a0@(P_RLINK),a1 1544 movl a0@(P_LINK),a1@(P_LINK) 1545 movl #_qs,a1 1546 movl d0,d1 1547 lslb #3,d1 1548 addl d1,a1 1549 cmpl a1@(P_LINK),a1 1550 jeq Lrem2 1551 movl _whichqs,d1 1552 bset d0,d1 1553 movl d1,_whichqs 1554Lrem2: 1555 clrl a0@(P_RLINK) 1556 rts 1557 1558Lrem3: 1559 .asciz "remrq" 1560Lsw0: 1561 .asciz "swtch" 1562 .even 1563 1564 .globl _curpcb 1565 .globl _masterpaddr | XXX compatibility (debuggers) 1566 .data 1567_masterpaddr: | XXX compatibility (debuggers) 1568_curpcb: 1569 .long 0 1570pcbflag: 1571 .byte 0 | copy of pcb_flags low byte 1572 .align 2 1573 .comm nullpcb,SIZEOF_PCB 1574 .text 1575 1576/* 1577 * At exit of a process, do a swtch for the last time. 1578 * The mapping of the pcb at p->p_addr has already been deleted, 1579 * and the memory for the pcb+stack has been freed. 1580 * The ipl is high enough to prevent the memory from being reallocated. 1581 */ 1582ENTRY(swtch_exit) 1583 movl #nullpcb,_curpcb | save state into garbage pcb 1584 lea tmpstk,sp | goto a tmp stack 1585 jra _swtch 1586 1587/* 1588 * When no processes are on the runq, Swtch branches to idle 1589 * to wait for something to come ready. 1590 */ 1591 .globl Idle 1592Lidle: 1593 stop #PSL_LOWIPL 1594Idle: 1595idle: 1596 movw #PSL_HIGHIPL,sr 1597 tstl _whichqs 1598 jeq Lidle 1599 movw #PSL_LOWIPL,sr 1600 jra Lsw1 1601 1602Lbadsw: 1603 movl #Lsw0,sp@- 1604 jbsr _panic 1605 /*NOTREACHED*/ 1606 1607/* 1608 * Swtch() 1609 * 1610 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the 1611 * entire ATC. The effort involved in selective flushing may not be 1612 * worth it, maybe we should just flush the whole thing? 1613 * 1614 * NOTE 2: With the new VM layout we now no longer know if an inactive 1615 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag 1616 * bit). For now, we just always flush the full ATC. 1617 */ 1618ENTRY(swtch) 1619 movl _curpcb,a0 | current pcb 1620 movw sr,a0@(PCB_PS) | save sr before changing ipl 1621#ifdef notyet 1622 movl _curproc,sp@- | remember last proc running 1623#endif 1624 clrl _curproc 1625 addql #1,_cnt+V_SWTCH 1626Lsw1: 1627 /* 1628 * Find the highest-priority queue that isn't empty, 1629 * then take the first proc from that queue. 1630 */ 1631 clrl d0 1632 lea _whichqs,a0 1633 movl a0@,d1 1634Lswchk: 1635 btst d0,d1 1636 jne Lswfnd 1637 addqb #1,d0 1638 cmpb #32,d0 1639 jne Lswchk 1640 jra idle 1641Lswfnd: 1642 movw #PSL_HIGHIPL,sr | lock out interrupts 1643 movl a0@,d1 | and check again... 1644 bclr d0,d1 1645 jeq Lsw1 | proc moved, rescan 1646 movl d1,a0@ | update whichqs 1647 moveq #1,d1 | double check for higher priority 1648 lsll d0,d1 | process (which may have snuck in 1649 subql #1,d1 | while we were finding this one) 1650 andl a0@,d1 1651 jeq Lswok | no one got in, continue 1652 movl a0@,d1 1653 bset d0,d1 | otherwise put this one back 1654 movl d1,a0@ 1655 jra Lsw1 | and rescan 1656Lswok: 1657 movl d0,d1 1658 lslb #3,d1 | convert queue number to index 1659 addl #_qs,d1 | locate queue (q) 1660 movl d1,a1 1661 cmpl a1@(P_LINK),a1 | anyone on queue? 1662 jeq Lbadsw | no, panic 1663 movl a1@(P_LINK),a0 | p = q->p_link 1664 movl a0@(P_LINK),a1@(P_LINK) | q->p_link = p->p_link 1665 movl a0@(P_LINK),a1 | q = p->p_link 1666 movl a0@(P_RLINK),a1@(P_RLINK) | q->p_rlink = p->p_rlink 1667 cmpl a0@(P_LINK),d1 | anyone left on queue? 1668 jeq Lsw2 | no, skip 1669 movl _whichqs,d1 1670 bset d0,d1 | yes, reset bit 1671 movl d1,_whichqs 1672Lsw2: 1673 movl a0,_curproc 1674 clrl _want_resched 1675#ifdef notyet 1676 movl sp@+,a1 1677 cmpl a0,a1 | switching to same proc? 1678 jeq Lswdone | yes, skip save and restore 1679#endif 1680 /* 1681 * Save state of previous process in its pcb. 1682 */ 1683 movl _curpcb,a1 1684 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers 1685 movl usp,a2 | grab USP (a2 has been saved) 1686 movl a2,a1@(PCB_USP) | and save it 1687 movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE 1688#ifdef FPCOPROC 1689 lea a1@(PCB_FPCTX),a2 | pointer to FP save area 1690 fsave a2@ | save FP state 1691 tstb a2@ | null state frame? 1692 jeq Lswnofpsave | yes, all done 1693 fmovem fp0-fp7,a2@(216) | save FP general registers 1694 fmovem fpcr/fpsr/fpi,a2@(312) | save FP control registers 1695Lswnofpsave: 1696#endif 1697 1698#ifdef DIAGNOSTIC 1699 tstl a0@(P_WCHAN) 1700 jne Lbadsw 1701 cmpb #SRUN,a0@(P_STAT) 1702 jne Lbadsw 1703#endif 1704 clrl a0@(P_RLINK) | clear back link 1705 movl a0@(P_ADDR),a1 | get p_addr 1706 movl a1,_curpcb 1707 movb a1@(PCB_FLAGS+1),pcbflag | copy of pcb_flags low byte 1708 1709 /* see if pmap_activate needs to be called; should remove this */ 1710 movl a0@(P_VMSPACE),a0 | vmspace = p->p_vmspace 1711#ifdef DIAGNOSTIC 1712 tstl a0 | map == VM_MAP_NULL? 1713 jeq Lbadsw | panic 1714#endif 1715 lea a0@(VM_PMAP),a0 | pmap = &vmspace.vm_pmap 1716 tstl a0@(PM_STCHG) | pmap->st_changed? 1717 jeq Lswnochg | no, skip 1718 pea a1@ | push pcb (at p_addr) 1719 pea a0@ | push pmap 1720 jbsr _pmap_activate | pmap_activate(pmap, pcb) 1721 addql #8,sp 1722 movl _curpcb,a1 | restore p_addr 1723Lswnochg: 1724 1725#ifdef PROFTIMER 1726#ifdef notdef 1727 movw #SPL6,sr | protect against clock interrupts 1728#endif 1729 bclr #0,_profon | clear user profiling bit, was set? 1730 jeq Lskipoff | no, clock off or doing kernel only 1731#ifdef GPROF 1732 tstb _profon | kernel profiling also enabled? 1733 jlt Lskipoff | yes, nothing more to do 1734#endif 1735 CLKADDR(a0) 1736 movb #0,a0@(CLKCR2) | no, just user, select CR3 1737 movb #0,a0@(CLKCR3) | and turn it off 1738Lskipoff: 1739#endif 1740 movl #PGSHIFT,d1 1741 movl a1,d0 1742 lsrl d1,d0 | convert p_addr to page number 1743 lsll #2,d0 | and now to Systab offset 1744 addl _Sysmap,d0 | add Systab base to get PTE addr 1745#ifdef notdef 1746 movw #PSL_HIGHIPL,sr | go crit while changing PTEs 1747#endif 1748 lea tmpstk,sp | now goto a tmp stack for NMI 1749 movl d0,a0 | address of new context 1750 movl _Umap,a2 | address of PTEs for kstack 1751 moveq #UPAGES-1,d0 | sizeof kstack 1752Lres1: 1753 movl a0@+,d1 | get PTE 1754 andl #~PG_PROT,d1 | mask out old protection 1755 orl #PG_RW+PG_V,d1 | ensure valid and writable 1756 movl d1,a2@+ | load it up 1757 dbf d0,Lres1 | til done 1758 movl #CACHE_CLR,d0 1759 movc d0,cacr | invalidate cache(s) 1760#if defined(HP330) || defined(HP360) || defined(HP370) 1761 tstl _mmutype | HP MMU? 1762 jeq Lhpmmu4 | yes, skip 1763 pflusha | flush entire TLB 1764 movl a1@(PCB_USTP),d0 | get USTP 1765 moveq #PGSHIFT,d1 1766 lsll d1,d0 | convert to addr 1767 lea _protorp,a0 | CRP prototype 1768 movl d0,a0@(4) | stash USTP 1769 pmove a0@,crp | load new user root pointer 1770 jra Lcxswdone | thats it 1771Lhpmmu4: 1772#endif 1773#if defined(HP320) || defined(HP350) 1774 MMUADDR(a0) 1775 movl a0@(MMUTBINVAL),d1 | invalidate TLB 1776 tstl _ectype | got external VAC? 1777 jle Lnocache1 | no, skip 1778 andl #~MMU_CEN,a0@(MMUCMD) | toggle cache enable 1779 orl #MMU_CEN,a0@(MMUCMD) | to clear data cache 1780Lnocache1: 1781 movl a1@(PCB_USTP),a0@(MMUUSTP) | context switch 1782#endif 1783Lcxswdone: 1784 movl a1@(PCB_CMAP2),_CMAP2 | reload tmp map 1785 moveml a1@(PCB_REGS),#0xFCFC | and registers 1786 movl a1@(PCB_USP),a0 1787 movl a0,usp | and USP 1788#ifdef PROFTIMER 1789 tstl a1@(U_PROFSCALE) | process being profiled? 1790 jeq Lskipon | no, do nothing 1791 orb #1,_profon | turn on user profiling bit 1792#ifdef GPROF 1793 jlt Lskipon | already profiling kernel, all done 1794#endif 1795 CLKADDR(a0) 1796 movl _profint,d1 | profiling interval 1797 subql #1,d1 | adjusted 1798 movepw d1,a0@(CLKMSB3) | set interval 1799 movb #0,a0@(CLKCR2) | select CR3 1800 movb #64,a0@(CLKCR3) | turn it on 1801Lskipon: 1802#endif 1803#ifdef FPCOPROC 1804 lea a1@(PCB_FPCTX),a0 | pointer to FP save area 1805 tstb a0@ | null state frame? 1806 jeq Lresfprest | yes, easy 1807 fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers 1808 fmovem a0@(216),fp0-fp7 | restore FP general registers 1809Lresfprest: 1810 frestore a0@ | restore state 1811#endif 1812 movw a1@(PCB_PS),sr | no, restore PS 1813 moveq #1,d0 | return 1 (for alternate returns) 1814 rts 1815 1816/* 1817 * savectx(pcb, altreturn) 1818 * Update pcb, saving current processor state and arranging 1819 * for alternate return ala longjmp in swtch if altreturn is true. 1820 */ 1821ENTRY(savectx) 1822 movl sp@(4),a1 1823 movw sr,a1@(PCB_PS) 1824 movl usp,a0 | grab USP 1825 movl a0,a1@(PCB_USP) | and save it 1826 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers 1827 movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE 1828#ifdef FPCOPROC 1829 lea a1@(PCB_FPCTX),a0 | pointer to FP save area 1830 fsave a0@ | save FP state 1831 tstb a0@ | null state frame? 1832 jeq Lsvnofpsave | yes, all done 1833 fmovem fp0-fp7,a0@(216) | save FP general registers 1834 fmovem fpcr/fpsr/fpi,a0@(312) | save FP control registers 1835Lsvnofpsave: 1836#endif 1837 tstl sp@(8) | altreturn? 1838 jeq Lsavedone 1839 movl sp,d0 | relocate current sp relative to a1 1840 subl #_kstack,d0 | (sp is relative to kstack): 1841 addl d0,a1 | a1 += sp - kstack; 1842 movl sp@,a1@ | write return pc at (relocated) sp@ 1843Lsavedone: 1844 moveq #0,d0 | return 0 1845 rts 1846 1847/* 1848 * {fu,su},{byte,sword,word} 1849 */ 1850ALTENTRY(fuiword, _fuword) 1851ENTRY(fuword) 1852 movl sp@(4),a0 | address to read 1853 movl _curpcb,a1 | current pcb 1854 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1855 movsl a0@,d0 | do read from user space 1856 jra Lfsdone 1857 1858ENTRY(fusword) 1859 movl sp@(4),a0 1860 movl _curpcb,a1 | current pcb 1861 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1862 moveq #0,d0 1863 movsw a0@,d0 | do read from user space 1864 jra Lfsdone 1865 1866ALTENTRY(fuibyte, _fubyte) 1867ENTRY(fubyte) 1868 movl sp@(4),a0 | address to read 1869 movl _curpcb,a1 | current pcb 1870 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1871 moveq #0,d0 1872 movsb a0@,d0 | do read from user space 1873 jra Lfsdone 1874 1875Lfserr: 1876 moveq #-1,d0 | error indicator 1877Lfsdone: 1878 clrl a1@(PCB_ONFAULT) | clear fault address 1879 rts 1880 1881ALTENTRY(suiword, _suword) 1882ENTRY(suword) 1883 movl sp@(4),a0 | address to write 1884 movl sp@(8),d0 | value to put there 1885 movl _curpcb,a1 | current pcb 1886 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1887 movsl d0,a0@ | do write to user space 1888 moveq #0,d0 | indicate no fault 1889 jra Lfsdone 1890 1891ENTRY(susword) 1892 movl sp@(4),a0 | address to write 1893 movw sp@(10),d0 | value to put there 1894 movl _curpcb,a1 | current pcb 1895 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1896 movsw d0,a0@ | do write to user space 1897 moveq #0,d0 | indicate no fault 1898 jra Lfsdone 1899 1900ALTENTRY(suibyte, _subyte) 1901ENTRY(subyte) 1902 movl sp@(4),a0 | address to write 1903 movb sp@(11),d0 | value to put there 1904 movl _curpcb,a1 | current pcb 1905 movl #Lfserr,a1@(PCB_ONFAULT) | where to return to on a fault 1906 movsb d0,a0@ | do write to user space 1907 moveq #0,d0 | indicate no fault 1908 jra Lfsdone 1909 1910/* 1911 * Copy 1 relocation unit (NBPG bytes) 1912 * from user virtual address to physical address 1913 */ 1914ENTRY(copyseg) 1915 movl _curpcb,a1 | current pcb 1916 movl #Lcpydone,a1@(PCB_ONFAULT) | where to return to on a fault 1917 movl sp@(8),d0 | destination page number 1918 moveq #PGSHIFT,d1 1919 lsll d1,d0 | convert to address 1920 orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable 1921 movl _CMAP2,a0 1922 movl _CADDR2,sp@- | destination kernel VA 1923 movl d0,a0@ | load in page table 1924 jbsr _TBIS | invalidate any old mapping 1925 addql #4,sp 1926 movl _CADDR2,a1 | destination addr 1927 movl sp@(4),a0 | source addr 1928 movl #NBPG/4-1,d0 | count 1929Lcpyloop: 1930 movsl a0@+,d1 | read longword 1931 movl d1,a1@+ | write longword 1932 dbf d0,Lcpyloop | continue until done 1933Lcpydone: 1934 movl _curpcb,a1 | current pcb 1935 clrl a1@(PCB_ONFAULT) | clear error catch 1936 rts 1937 1938/* 1939 * Copy 1 relocation unit (NBPG bytes) 1940 * from physical address to physical address 1941 */ 1942ENTRY(physcopyseg) 1943 movl sp@(4),d0 | source page number 1944 moveq #PGSHIFT,d1 1945 lsll d1,d0 | convert to address 1946 orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable 1947 movl _CMAP1,a0 1948 movl d0,a0@ | load in page table 1949 movl _CADDR1,sp@- | destination kernel VA 1950 jbsr _TBIS | invalidate any old mapping 1951 addql #4,sp 1952 1953 movl sp@(8),d0 | destination page number 1954 moveq #PGSHIFT,d1 1955 lsll d1,d0 | convert to address 1956 orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable 1957 movl _CMAP2,a0 1958 movl d0,a0@ | load in page table 1959 movl _CADDR2,sp@- | destination kernel VA 1960 jbsr _TBIS | invalidate any old mapping 1961 addql #4,sp 1962 1963 movl _CADDR1,a0 | source addr 1964 movl _CADDR2,a1 | destination addr 1965 movl #NBPG/4-1,d0 | count 1966Lpcpy: 1967 movl a0@+,a1@+ | copy longword 1968 dbf d0,Lpcpy | continue until done 1969 rts 1970 1971/* 1972 * zero out physical memory 1973 * specified in relocation units (NBPG bytes) 1974 */ 1975ENTRY(clearseg) 1976 movl sp@(4),d0 | destination page number 1977 moveq #PGSHIFT,d1 1978 lsll d1,d0 | convert to address 1979 orl #PG_CI+PG_RW+PG_V,d0 | make sure valid and writable 1980 movl _CMAP1,a0 1981 movl _CADDR1,sp@- | destination kernel VA 1982 movl d0,a0@ | load in page map 1983 jbsr _TBIS | invalidate any old mapping 1984 addql #4,sp 1985 movl _CADDR1,a1 | destination addr 1986 movl #NBPG/4-1,d0 | count 1987/* simple clear loop is fastest on 68020 */ 1988Lclrloop: 1989 clrl a1@+ | clear a longword 1990 dbf d0,Lclrloop | continue til done 1991 rts 1992 1993/* 1994 * Invalidate entire TLB. 1995 */ 1996ENTRY(TBIA) 1997__TBIA: 1998#if defined(HP330) || defined(HP360) || defined(HP370) 1999 tstl _mmutype | HP MMU? 2000 jeq Lhpmmu6 | yes, skip 2001 pflusha | flush entire TLB 2002#if defined(HP360) || defined(HP370) 2003 jpl Lmc68851a | 68851 implies no d-cache 2004 movl #DC_CLEAR,d0 2005 movc d0,cacr | invalidate on-chip d-cache 2006Lmc68851a: 2007#endif 2008 rts 2009Lhpmmu6: 2010#endif 2011#if defined(HP320) || defined(HP350) 2012 MMUADDR(a0) 2013 movl a0@(MMUTBINVAL),sp@- | do not ask me, this 2014 addql #4,sp | is how hpux does it 2015#ifdef DEBUG 2016 tstl fullcflush 2017 jne __DCIA | XXX: invalidate entire cache 2018#endif 2019#endif 2020 rts 2021 2022/* 2023 * Invalidate any TLB entry for given VA (TB Invalidate Single) 2024 */ 2025ENTRY(TBIS) 2026#ifdef DEBUG 2027 tstl fulltflush | being conservative? 2028 jne __TBIA | yes, flush entire TLB 2029#endif 2030#if defined(HP330) || defined(HP360) || defined(HP370) 2031 tstl _mmutype | HP MMU? 2032 jeq Lhpmmu5 | yes, skip 2033 movl sp@(4),a0 | get addr to flush 2034#if defined(HP360) || defined(HP370) 2035 jpl Lmc68851b | is 68851? 2036 pflush #0,#0,a0@ | flush address from both sides 2037 movl #DC_CLEAR,d0 2038 movc d0,cacr | invalidate on-chip data cache 2039 rts 2040Lmc68851b: 2041#endif 2042 pflushs #0,#0,a0@ | flush address from both sides 2043 rts 2044Lhpmmu5: 2045#endif 2046#if defined(HP320) || defined(HP350) 2047 movl sp@(4),d0 | VA to invalidate 2048 bclr #0,d0 | ensure even 2049 movl d0,a0 2050 movw sr,d1 | go critical 2051 movw #PSL_HIGHIPL,sr | while in purge space 2052 moveq #FC_PURGE,d0 | change address space 2053 movc d0,dfc | for destination 2054 moveq #0,d0 | zero to invalidate? 2055 movsl d0,a0@ | hit it 2056 moveq #FC_USERD,d0 | back to old 2057 movc d0,dfc | address space 2058 movw d1,sr | restore IPL 2059#endif 2060 rts 2061 2062/* 2063 * Invalidate supervisor side of TLB 2064 */ 2065ENTRY(TBIAS) 2066#ifdef DEBUG 2067 tstl fulltflush | being conservative? 2068 jne __TBIA | yes, flush everything 2069#endif 2070#if defined(HP330) || defined(HP360) || defined(HP370) 2071 tstl _mmutype | HP MMU? 2072 jeq Lhpmmu7 | yes, skip 2073#if defined(HP360) || defined(HP370) 2074 jpl Lmc68851c | 68851? 2075 pflush #4,#4 | flush supervisor TLB entries 2076 movl #DC_CLEAR,d0 2077 movc d0,cacr | invalidate on-chip d-cache 2078 rts 2079Lmc68851c: 2080#endif 2081 pflushs #4,#4 | flush supervisor TLB entries 2082 rts 2083Lhpmmu7: 2084#endif 2085#if defined(HP320) || defined(HP350) 2086 MMUADDR(a0) 2087 movl #0x8000,d0 | more 2088 movl d0,a0@(MMUTBINVAL) | HP magic 2089#ifdef DEBUG 2090 tstl fullcflush 2091 jne __DCIS | XXX: invalidate entire sup. cache 2092#endif 2093#endif 2094 rts 2095 2096/* 2097 * Invalidate user side of TLB 2098 */ 2099ENTRY(TBIAU) 2100#ifdef DEBUG 2101 tstl fulltflush | being conservative? 2102 jne __TBIA | yes, flush everything 2103#endif 2104#if defined(HP330) || defined(HP360) || defined(HP370) 2105 tstl _mmutype | HP MMU? 2106 jeq Lhpmmu8 | yes, skip 2107#if defined(HP360) || defined(HP370) 2108 jpl Lmc68851d | 68851? 2109 pflush #0,#4 | flush user TLB entries 2110 movl #DC_CLEAR,d0 2111 movc d0,cacr | invalidate on-chip d-cache 2112 rts 2113Lmc68851d: 2114#endif 2115 pflushs #0,#4 | flush user TLB entries 2116 rts 2117Lhpmmu8: 2118#endif 2119#if defined(HP320) || defined(HP350) 2120 MMUADDR(a0) 2121 moveq #0,d0 | more 2122 movl d0,a0@(MMUTBINVAL) | HP magic 2123#ifdef DEBUG 2124 tstl fullcflush 2125 jne __DCIU | XXX: invalidate entire user cache 2126#endif 2127#endif 2128 rts 2129 2130/* 2131 * Invalidate instruction cache 2132 */ 2133ENTRY(ICIA) 2134 movl #IC_CLEAR,d0 2135 movc d0,cacr | invalidate i-cache 2136 rts 2137 2138/* 2139 * Invalidate data cache. 2140 * HP external cache allows for invalidation of user/supervisor portions. 2141 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing 2142 * problems with DC_WA. The only cases we have to worry about are context 2143 * switch and TLB changes, both of which are handled "in-line" in resume 2144 * and TBI*. 2145 */ 2146ENTRY(DCIA) 2147__DCIA: 2148#if defined(HP320) || defined(HP350) 2149 tstl _ectype | got external VAC? 2150 jle Lnocache2 | no, all done 2151 MMUADDR(a0) 2152 andl #~MMU_CEN,a0@(MMUCMD) | disable cache in MMU control reg 2153 orl #MMU_CEN,a0@(MMUCMD) | reenable cache in MMU control reg 2154Lnocache2: 2155#endif 2156 rts 2157 2158ENTRY(DCIS) 2159__DCIS: 2160#if defined(HP320) || defined(HP350) 2161 tstl _ectype | got external VAC? 2162 jle Lnocache3 | no, all done 2163 MMUADDR(a0) 2164 movl a0@(MMUSSTP),d0 | read the supervisor STP 2165 movl d0,a0@(MMUSSTP) | write it back 2166Lnocache3: 2167#endif 2168 rts 2169 2170ENTRY(DCIU) 2171__DCIU: 2172#if defined(HP320) || defined(HP350) 2173 tstl _ectype | got external VAC? 2174 jle Lnocache4 | no, all done 2175 MMUADDR(a0) 2176 movl a0@(MMUUSTP),d0 | read the user STP 2177 movl d0,a0@(MMUUSTP) | write it back 2178Lnocache4: 2179#endif 2180 rts 2181 2182ENTRY(PCIA) 2183#if defined(HP360) || defined(HP370) 2184 movl #DC_CLEAR,d0 2185 movc d0,cacr | invalidate on-chip d-cache 2186 tstl _ectype | got external PAC? 2187 jge Lnocache6 | no, all done 2188 MMUADDR(a0) 2189 andl #~MMU_CEN,a0@(MMUCMD) | disable cache in MMU control reg 2190 orl #MMU_CEN,a0@(MMUCMD) | reenable cache in MMU control reg 2191Lnocache6: 2192#endif 2193 rts 2194 2195ENTRY(ecacheon) 2196 tstl _ectype 2197 jeq Lnocache7 2198 MMUADDR(a0) 2199 orl #MMU_CEN,a0@(MMUCMD) 2200Lnocache7: 2201 rts 2202 2203ENTRY(ecacheoff) 2204 tstl _ectype 2205 jeq Lnocache8 2206 MMUADDR(a0) 2207 andl #~MMU_CEN,a0@(MMUCMD) 2208Lnocache8: 2209 rts 2210 2211/* 2212 * Get callers current SP value. 2213 * Note that simply taking the address of a local variable in a C function 2214 * doesn't work because callee saved registers may be outside the stack frame 2215 * defined by A6 (e.g. GCC generated code). 2216 */ 2217 .globl _getsp 2218_getsp: 2219 movl sp,d0 | get current SP 2220 addql #4,d0 | compensate for return address 2221 rts 2222 2223 .globl _getsfc, _getdfc 2224_getsfc: 2225 movc sfc,d0 2226 rts 2227_getdfc: 2228 movc dfc,d0 2229 rts 2230 2231/* 2232 * Load a new user segment table pointer. 2233 */ 2234ENTRY(loadustp) 2235#if defined(HP330) || defined(HP360) || defined(HP370) 2236 tstl _mmutype | HP MMU? 2237 jeq Lhpmmu9 | yes, skip 2238 movl sp@(4),d0 | new USTP 2239 moveq #PGSHIFT,d1 2240 lsll d1,d0 | convert to addr 2241 lea _protorp,a0 | CRP prototype 2242 movl d0,a0@(4) | stash USTP 2243 pmove a0@,crp | load root pointer 2244 movl #DC_CLEAR,d0 2245 movc d0,cacr | invalidate on-chip d-cache 2246 rts | since pmove flushes TLB 2247Lhpmmu9: 2248#endif 2249#if defined(HP320) || defined(HP350) 2250 MMUADDR(a0) 2251 movl sp@(4),a0@(MMUUSTP) | load a new USTP 2252#endif 2253 rts 2254 2255/* 2256 * Flush any hardware context associated with given USTP. 2257 * Only does something for HP330 where we must flush RPT 2258 * and ATC entries in PMMU. 2259 */ 2260ENTRY(flushustp) 2261#if defined(HP330) 2262 tstl _mmutype | 68851 PMMU? 2263 jle Lnot68851 | no, nothing to do 2264 movl sp@(4),d0 | get USTP to flush 2265 moveq #PGSHIFT,d1 2266 lsll d1,d0 | convert to address 2267 movl d0,_protorp+4 | stash USTP 2268 pflushr _protorp | flush RPT/TLB entries 2269Lnot68851: 2270#endif 2271 rts 2272 2273ENTRY(ploadw) 2274#if defined(HP330) || defined(HP360) || defined(HP370) 2275 movl sp@(4),a0 | address to load 2276 ploadw #1,a0@ | pre-load translation 2277#endif 2278 rts 2279 2280/* 2281 * Set processor priority level calls. Most are implemented with 2282 * inline asm expansions. However, spl0 requires special handling 2283 * as we need to check for our emulated software interrupts. 2284 */ 2285 2286ENTRY(spl0) 2287 moveq #0,d0 2288 movw sr,d0 | get old SR for return 2289 movw #PSL_LOWIPL,sr | restore new SR 2290 tstb _ssir | software interrupt pending? 2291 jeq Lspldone | no, all done 2292 subql #4,sp | make room for RTE frame 2293 movl sp@(4),sp@(2) | position return address 2294 clrw sp@(6) | set frame type 0 2295 movw #PSL_LOWIPL,sp@ | and new SR 2296 jra Lgotsir | go handle it 2297Lspldone: 2298 rts 2299 2300ENTRY(_insque) 2301 movw sr,d0 2302 movw #PSL_HIGHIPL,sr | atomic 2303 movl sp@(8),a0 | where to insert (after) 2304 movl sp@(4),a1 | element to insert (e) 2305 movl a0@,a1@ | e->next = after->next 2306 movl a0,a1@(4) | e->prev = after 2307 movl a1,a0@ | after->next = e 2308 movl a1@,a0 2309 movl a1,a0@(4) | e->next->prev = e 2310 movw d0,sr 2311 rts 2312 2313ENTRY(_remque) 2314 movw sr,d0 2315 movw #PSL_HIGHIPL,sr | atomic 2316 movl sp@(4),a0 | element to remove (e) 2317 movl a0@,a1 2318 movl a0@(4),a0 2319 movl a0,a1@(4) | e->next->prev = e->prev 2320 movl a1,a0@ | e->prev->next = e->next 2321 movw d0,sr 2322 rts 2323 2324/* 2325 * bzero(addr, count) 2326 */ 2327ALTENTRY(blkclr, _bzero) 2328ENTRY(bzero) 2329 movl sp@(4),a0 | address 2330 movl sp@(8),d0 | count 2331 jeq Lbzdone | if zero, nothing to do 2332 movl a0,d1 2333 btst #0,d1 | address odd? 2334 jeq Lbzeven | no, can copy words 2335 clrb a0@+ | yes, zero byte to get to even boundary 2336 subql #1,d0 | decrement count 2337 jeq Lbzdone | none left, all done 2338Lbzeven: 2339 movl d0,d1 2340 andl #31,d0 2341 lsrl #5,d1 | convert count to 8*longword count 2342 jeq Lbzbyte | no such blocks, zero byte at a time 2343Lbzloop: 2344 clrl a0@+; clrl a0@+; clrl a0@+; clrl a0@+; 2345 clrl a0@+; clrl a0@+; clrl a0@+; clrl a0@+; 2346 subql #1,d1 | one more block zeroed 2347 jne Lbzloop | more to go, do it 2348 tstl d0 | partial block left? 2349 jeq Lbzdone | no, all done 2350Lbzbyte: 2351 clrb a0@+ 2352 subql #1,d0 | one more byte cleared 2353 jne Lbzbyte | more to go, do it 2354Lbzdone: 2355 rts 2356 2357/* 2358 * strlen(str) 2359 */ 2360ENTRY(strlen) 2361 moveq #-1,d0 2362 movl sp@(4),a0 | string 2363Lslloop: 2364 addql #1,d0 | increment count 2365 tstb a0@+ | null? 2366 jne Lslloop | no, keep going 2367 rts 2368 2369/* 2370 * bcmp(s1, s2, len) 2371 * 2372 * WARNING! This guy only works with counts up to 64K 2373 */ 2374ENTRY(bcmp) 2375 movl sp@(4),a0 | string 1 2376 movl sp@(8),a1 | string 2 2377 moveq #0,d0 2378 movw sp@(14),d0 | length 2379 jeq Lcmpdone | if zero, nothing to do 2380 subqw #1,d0 | set up for DBcc loop 2381Lcmploop: 2382 cmpmb a0@+,a1@+ | equal? 2383 dbne d0,Lcmploop | yes, keep going 2384 addqw #1,d0 | +1 gives zero on match 2385Lcmpdone: 2386 rts 2387 2388/* 2389 * {ov}bcopy(from, to, len) 2390 * 2391 * Works for counts up to 128K. 2392 */ 2393ALTENTRY(ovbcopy, _bcopy) 2394ENTRY(bcopy) 2395 movl sp@(12),d0 | get count 2396 jeq Lcpyexit | if zero, return 2397 movl sp@(4),a0 | src address 2398 movl sp@(8),a1 | dest address 2399 cmpl a1,a0 | src before dest? 2400 jlt Lcpyback | yes, copy backwards (avoids overlap) 2401 movl a0,d1 2402 btst #0,d1 | src address odd? 2403 jeq Lcfeven | no, go check dest 2404 movb a0@+,a1@+ | yes, copy a byte 2405 subql #1,d0 | update count 2406 jeq Lcpyexit | exit if done 2407Lcfeven: 2408 movl a1,d1 2409 btst #0,d1 | dest address odd? 2410 jne Lcfbyte | yes, must copy by bytes 2411 movl d0,d1 | no, get count 2412 lsrl #2,d1 | convert to longwords 2413 jeq Lcfbyte | no longwords, copy bytes 2414 subql #1,d1 | set up for dbf 2415Lcflloop: 2416 movl a0@+,a1@+ | copy longwords 2417 dbf d1,Lcflloop | til done 2418 andl #3,d0 | get remaining count 2419 jeq Lcpyexit | done if none 2420Lcfbyte: 2421 subql #1,d0 | set up for dbf 2422Lcfbloop: 2423 movb a0@+,a1@+ | copy bytes 2424 dbf d0,Lcfbloop | til done 2425Lcpyexit: 2426 rts 2427Lcpyback: 2428 addl d0,a0 | add count to src 2429 addl d0,a1 | add count to dest 2430 movl a0,d1 2431 btst #0,d1 | src address odd? 2432 jeq Lcbeven | no, go check dest 2433 movb a0@-,a1@- | yes, copy a byte 2434 subql #1,d0 | update count 2435 jeq Lcpyexit | exit if done 2436Lcbeven: 2437 movl a1,d1 2438 btst #0,d1 | dest address odd? 2439 jne Lcbbyte | yes, must copy by bytes 2440 movl d0,d1 | no, get count 2441 lsrl #2,d1 | convert to longwords 2442 jeq Lcbbyte | no longwords, copy bytes 2443 subql #1,d1 | set up for dbf 2444Lcblloop: 2445 movl a0@-,a1@- | copy longwords 2446 dbf d1,Lcblloop | til done 2447 andl #3,d0 | get remaining count 2448 jeq Lcpyexit | done if none 2449Lcbbyte: 2450 subql #1,d0 | set up for dbf 2451Lcbbloop: 2452 movb a0@-,a1@- | copy bytes 2453 dbf d0,Lcbbloop | til done 2454 rts 2455 2456/* 2457 * Emulate fancy VAX string operations: 2458 * scanc(count, startc, table, mask) 2459 * skpc(mask, count, startc) 2460 * locc(mask, count, startc) 2461 */ 2462ENTRY(scanc) 2463 movl sp@(4),d0 | get length 2464 jeq Lscdone | nothing to do, return 2465 movl sp@(8),a0 | start of scan 2466 movl sp@(12),a1 | table to compare with 2467 movb sp@(19),d1 | and mask to use 2468 movw d2,sp@- | need a scratch register 2469 clrw d2 | clear it out 2470 subqw #1,d0 | adjust for dbra 2471Lscloop: 2472 movb a0@+,d2 | get character 2473 movb a1@(0,d2:w),d2 | get table entry 2474 andb d1,d2 | mask it 2475 dbne d0,Lscloop | keep going til no more or non-zero 2476 addqw #1,d0 | overshot by one 2477 movw sp@+,d2 | restore scratch 2478Lscdone: 2479 rts 2480 2481ENTRY(skpc) 2482 movl sp@(8),d0 | get length 2483 jeq Lskdone | nothing to do, return 2484 movb sp@(7),d1 | mask to use 2485 movl sp@(12),a0 | where to start 2486 subqw #1,d0 | adjust for dbcc 2487Lskloop: 2488 cmpb a0@+,d1 | compate with mask 2489 dbne d0,Lskloop | keep going til no more or zero 2490 addqw #1,d0 | overshot by one 2491Lskdone: 2492 rts 2493 2494ENTRY(locc) 2495 movl sp@(8),d0 | get length 2496 jeq Llcdone | nothing to do, return 2497 movb sp@(7),d1 | mask to use 2498 movl sp@(12),a0 | where to start 2499 subqw #1,d0 | adjust for dbcc 2500Llcloop: 2501 cmpb a0@+,d1 | compate with mask 2502 dbeq d0,Llcloop | keep going til no more or non-zero 2503 addqw #1,d0 | overshot by one 2504Llcdone: 2505 rts 2506 2507/* 2508 * Emulate VAX FFS (find first set) instruction. 2509 */ 2510ENTRY(ffs) 2511 moveq #-1,d0 2512 movl sp@(4),d1 2513 jeq Lffsdone 2514Lffsloop: 2515 addql #1,d0 2516 btst d0,d1 2517 jeq Lffsloop 2518Lffsdone: 2519 addql #1,d0 2520 rts 2521 2522#ifdef FPCOPROC 2523/* 2524 * Save and restore 68881 state. 2525 * Pretty awful looking since our assembler does not 2526 * recognize FP mnemonics. 2527 */ 2528ENTRY(m68881_save) 2529 movl sp@(4),a0 | save area pointer 2530 fsave a0@ | save state 2531 tstb a0@ | null state frame? 2532 jeq Lm68881sdone | yes, all done 2533 fmovem fp0-fp7,a0@(216) | save FP general registers 2534 fmovem fpcr/fpsr/fpi,a0@(312) | save FP control registers 2535Lm68881sdone: 2536 rts 2537 2538ENTRY(m68881_restore) 2539 movl sp@(4),a0 | save area pointer 2540 tstb a0@ | null state frame? 2541 jeq Lm68881rdone | yes, easy 2542 fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers 2543 fmovem a0@(216),fp0-fp7 | restore FP general registers 2544Lm68881rdone: 2545 frestore a0@ | restore state 2546 rts 2547#endif 2548 2549/* 2550 * Handle the nitty-gritty of rebooting the machine. 2551 * Basically we just turn off the MMU and jump to the appropriate ROM routine. 2552 * Note that we must be running in an address range that is mapped one-to-one 2553 * logical to physical so that the PC is still valid immediately after the MMU 2554 * is turned off. We have conveniently mapped the last page of physical 2555 * memory this way. 2556 */ 2557 .globl _doboot 2558_doboot: 2559 movl #CACHE_OFF,d0 2560 movc d0,cacr | disable on-chip cache(s) 2561#if defined(HP320) || defined(HP350) || defined(HP370) 2562 tstl _ectype 2563 jeq Lnocache5 2564 MMUADDR(a0) 2565 andl #~MMU_CEN,a0@(MMUCMD) | disable external cache 2566Lnocache5: 2567#endif 2568 lea MAXADDR,a0 | last page of physical memory 2569 movl _boothowto,a0@+ | store howto 2570 movl _bootdev,a0@+ | and devtype 2571 lea Lbootcode,a1 | start of boot code 2572 lea Lebootcode,a3 | end of boot code 2573Lbootcopy: 2574 movw a1@+,a0@+ | copy a word 2575 cmpl a3,a1 | done yet? 2576 jcs Lbootcopy | no, keep going 2577 jmp MAXADDR+8 | jump to last page 2578 2579Lbootcode: 2580 lea MAXADDR+0x800,sp | physical SP in case of NMI 2581#if defined(HP330) || defined(HP360) || defined(HP370) 2582 tstl _mmutype | HP MMU? 2583 jeq LhpmmuB | yes, skip 2584 movl #0,a0@ | value for pmove to TC (turn off MMU) 2585 pmove a0@,tc | disable MMU 2586 jmp 0x1A4 | goto REQ_REBOOT 2587LhpmmuB: 2588#endif 2589#if defined(HP320) || defined(HP350) 2590 MMUADDR(a0) 2591 movl #0xFFFF0000,a0@(MMUCMD) | totally disable MMU 2592 movl d2,MAXADDR+NBPG-4 | restore old high page contents 2593 jmp 0x1A4 | goto REQ_REBOOT 2594#endif 2595Lebootcode: 2596 2597 .data 2598 .space NBPG 2599tmpstk: 2600 .globl _machineid 2601_machineid: 2602 .long 0 | default to 320 2603 .globl _mmutype,_protorp 2604_mmutype: 2605 .long 0 | default to HP MMU 2606_protorp: 2607 .long 0,0 | prototype root pointer 2608 .globl _ectype 2609_ectype: 2610 .long 0 | external cache type, default to none 2611 .globl _internalhpib 2612_internalhpib: 2613 .long 1 | has internal HP-IB, default to yes 2614 .globl _cold 2615_cold: 2616 .long 1 | cold start flag 2617 .globl _intiobase, _intiolimit, _extiobase, _CLKbase, _MMUbase 2618 .globl _proc0paddr 2619_proc0paddr: 2620 .long 0 | KVA of proc0 u-area 2621_intiobase: 2622 .long 0 | KVA of base of internal IO space 2623_intiolimit: 2624 .long 0 | KVA of end of internal IO space 2625_extiobase: 2626 .long 0 | KVA of base of external IO space 2627_CLKbase: 2628 .long 0 | KVA of base of clock registers 2629_MMUbase: 2630 .long 0 | KVA of base of HP MMU registers 2631#ifdef DEBUG 2632 .globl fulltflush, fullcflush 2633fulltflush: 2634 .long 0 2635fullcflush: 2636 .long 0 2637 .globl timebomb 2638timebomb: 2639 .long 0 2640#endif 2641/* interrupt counters */ 2642 .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames 2643_intrnames: 2644 .asciz "spur" 2645 .asciz "hil" 2646 .asciz "lev2" 2647 .asciz "lev3" 2648 .asciz "lev4" 2649 .asciz "lev5" 2650 .asciz "dma" 2651 .asciz "clock" 2652#ifdef PROFTIMER 2653 .asciz "pclock" 2654#endif 2655 .asciz "nmi" 2656_eintrnames: 2657 .even 2658_intrcnt: 2659#ifdef PROFTIMER 2660 .long 0,0,0,0,0,0,0,0,0,0 2661#else 2662 .long 0,0,0,0,0,0,0,0,0 2663#endif 2664_eintrcnt: 2665