1/* $NetBSD: cpuswitch.S,v 1.28 2002/10/19 00:10:53 bjh21 Exp $ */ 2 3/* 4 * Copyright (c) 1994-1998 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Brini. 21 * 4. The name of the company nor the name of the author may be used to 22 * endorse or promote products derived from this software without specific 23 * prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * RiscBSD kernel project 38 * 39 * cpuswitch.S 40 * 41 * cpu switching functions 42 * 43 * Created : 15/10/94 44 */ 45 46#include "opt_armfpe.h" 47#include "opt_multiprocessor.h" 48 49#include "assym.h" 50#include <machine/param.h> 51#include <machine/cpu.h> 52#include <machine/frame.h> 53#include <machine/asm.h> 54 55#undef IRQdisable 56#undef IRQenable 57 58/* 59 * New experimental definitions of IRQdisable and IRQenable 60 * These keep FIQ's enabled since FIQ's are special. 61 */ 62 63#define IRQdisable \ 64 mrs r14, cpsr ; \ 65 orr r14, r14, #(I32_bit) ; \ 66 msr cpsr_c, r14 ; \ 67 68#define IRQenable \ 69 mrs r14, cpsr ; \ 70 bic r14, r14, #(I32_bit) ; \ 71 msr cpsr_c, r14 ; \ 72 73 .text 74 75.Lwhichqs: 76 .word _C_LABEL(sched_whichqs) 77 78.Lqs: 79 .word _C_LABEL(sched_qs) 80 81/* 82 * cpuswitch() 83 * 84 * preforms a process context switch. 85 * This function has several entry points 86 */ 87 88#ifdef MULTIPROCESSOR 89.Lcpu_info_store: 90 .word _C_LABEL(cpu_info_store) 91.Lcurproc: 92 /* FIXME: This is bogus in the general case. */ 93 .word _C_LABEL(cpu_info_store) + CI_CURPROC 94 95.Lcurpcb: 96 .word _C_LABEL(cpu_info_store) + CI_CURPCB 97#else 98.Lcurproc: 99 .word _C_LABEL(curproc) 100 101.Lcurpcb: 102 .word _C_LABEL(curpcb) 103#endif 104 105.Lwant_resched: 106 .word _C_LABEL(want_resched) 107 108.Lcpufuncs: 109 .word _C_LABEL(cpufuncs) 110 111#ifndef MULTIPROCESSOR 112 .data 113 .global _C_LABEL(curpcb) 114_C_LABEL(curpcb): 115 .word 0x00000000 116 .text 117#endif 118 119.Lblock_userspace_access: 120 .word _C_LABEL(block_userspace_access) 121 122.Lcpu_do_powersave: 123 .word _C_LABEL(cpu_do_powersave) 124 125/* 126 * Idle loop, exercised while waiting for a process to wake up. 127 * 128 * NOTE: When we jump back to .Lswitch_search, we must have a 129 * pointer to whichqs in r7, which is what it is when we arrive 130 * here. 131 */ 132/* LINTSTUB: Ignore */ 133ASENTRY_NP(idle) 134#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) 135 bl _C_LABEL(sched_unlock_idle) 136#endif 137 ldr r3, .Lcpu_do_powersave 138 139 /* Enable interrupts */ 140 IRQenable 141 142 /* If we don't want to sleep, use a simpler loop. */ 143 ldr r3, [r3] /* r3 = cpu_do_powersave */ 144 teq r3, #0 145 bne 2f 146 147 /* Non-powersave idle. */ 1481: /* should maybe do uvm pageidlezero stuff here */ 149 ldr r3, [r7] /* r3 = whichqs */ 150 teq r3, #0x00000000 151 bne .Lswitch_search 152 b 1b 153 1542: /* Powersave idle. */ 155 ldr r4, .Lcpufuncs 1563: ldr r3, [r7] /* r3 = whichqs */ 157 teq r3, #0x00000000 158 bne .Lswitch_search 159 160 /* if saving power, don't want to pageidlezero */ 161 mov r0, #0 162 adr lr, 3b 163 ldr pc, [r4, #(CF_SLEEP)] 164 /* loops back around */ 165 166 167/* 168 * Find a new process to run, save the current context and 169 * load the new context 170 */ 171 172ENTRY(cpu_switch) 173/* 174 * Local register usage. Some of these registers are out of date. 175 * r1 = oldproc 176 * r3 = whichqs 177 * r4 = queue 178 * r5 = &qs[queue] 179 * r6 = newproc 180 * r7 = scratch 181 */ 182 stmfd sp!, {r4-r7, lr} 183 184 /* 185 * Get the current process and indicate that there is no longer 186 * a valid process (curproc = 0). Zero the current PCB pointer 187 * while we're at it. 188 */ 189 ldr r7, .Lcurproc 190 ldr r6, .Lcurpcb 191 mov r0, #0x00000000 192 ldr r1, [r7] /* r1 = curproc */ 193 str r0, [r7] /* curproc = NULL */ 194 str r0, [r6] /* curpcb = NULL */ 195 196 /* stash the old proc while we call functions */ 197 mov r5, r1 198 199#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) 200 /* release the sched_lock before handling interrupts */ 201 bl _C_LABEL(sched_unlock_idle) 202#endif 203 204 /* Lower the spl level to spl0 and get the current spl level. */ 205#ifdef __NEWINTR 206 mov r0, #(IPL_NONE) 207 bl _C_LABEL(_spllower) 208#else /* ! __NEWINTR */ 209#ifdef spl0 210 mov r0, #(_SPL_0) 211 bl _C_LABEL(splx) 212#else 213 bl _C_LABEL(spl0) 214#endif /* spl0 */ 215#endif /* __NEWINTR */ 216 217 /* Push the old spl level onto the stack */ 218 str r0, [sp, #-0x0004]! 219 220 /* First phase : find a new process */ 221 222 ldr r7, .Lwhichqs 223 224 /* rem: r5 = old proc */ 225 /* rem: r7 = &whichqs */ 226 227.Lswitch_search: 228 IRQdisable 229#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) 230 bl _C_LABEL(sched_lock_idle) 231#endif 232 233 /* Do we have any active queues */ 234 ldr r3, [r7] 235 236 /* If not we must idle until we do. */ 237 teq r3, #0x00000000 238 beq _ASM_LABEL(idle) 239 240 /* put old proc back in r1 */ 241 mov r1, r5 242 243 /* rem: r1 = old proc */ 244 /* rem: r3 = whichqs */ 245 /* rem: interrupts are disabled */ 246 247 /* 248 * We have found an active queue. Currently we do not know which queue 249 * is active just that one of them is. 250 */ 251 /* this is the ffs algorithm devised by d.seal and posted to 252 * comp.sys.arm on 16 Feb 1994. 253 */ 254 rsb r5, r3, #0 255 ands r0, r3, r5 256 257 adr r5, .Lcpu_switch_ffs_table 258 259 /* X = R0 */ 260 orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */ 261 orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */ 262 rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */ 263 264 /* used further down, saves SA stall */ 265 ldr r6, .Lqs 266 267 /* now lookup in table indexed on top 6 bits of a4 */ 268 ldrb r4, [ r5, r4, lsr #26 ] 269 270 /* rem: r0 = bit mask of chosen queue (1 << r4) */ 271 /* rem: r1 = old proc */ 272 /* rem: r3 = whichqs */ 273 /* rem: r4 = queue number */ 274 /* rem: interrupts are disabled */ 275 276 /* Get the address of the queue (&qs[queue]) */ 277 add r5, r6, r4, lsl #3 278 279 /* 280 * Get the process from the queue and place the next process in 281 * the queue at the head. This basically unlinks the process at 282 * the head of the queue. 283 */ 284 ldr r6, [r5, #(P_FORW)] 285 286 /* rem: r6 = new process */ 287 ldr r7, [r6, #(P_FORW)] 288 str r7, [r5, #(P_FORW)] 289 290 /* 291 * Test to see if the queue is now empty. If the head of the queue 292 * points to the queue itself then there are no more processes in 293 * the queue. We can therefore clear the queue not empty flag held 294 * in r3. 295 */ 296 297 teq r5, r7 298 biceq r3, r3, r0 299 300 /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */ 301 302 /* Fix the back pointer for the process now at the head of the queue. */ 303 ldr r0, [r6, #(P_BACK)] 304 str r0, [r7, #(P_BACK)] 305 306 /* Update the RAM copy of the queue not empty flags word. */ 307 ldr r7, .Lwhichqs 308 str r3, [r7] 309 310 /* rem: r1 = old proc */ 311 /* rem: r3 = whichqs - NOT NEEDED ANY MORE */ 312 /* rem: r4 = queue number - NOT NEEDED ANY MORE */ 313 /* rem: r6 = new process */ 314 /* rem: interrupts are disabled */ 315 316 /* Clear the want_resched flag */ 317 ldr r7, .Lwant_resched 318 mov r0, #0x00000000 319 str r0, [r7] 320 321 /* 322 * Clear the back pointer of the process we have removed from 323 * the head of the queue. The new process is isolated now. 324 */ 325 str r0, [r6, #(P_BACK)] 326 327#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) 328 /* 329 * unlock the sched_lock, but leave interrupts off, for now. 330 */ 331 mov r7, r1 332 bl _C_LABEL(sched_unlock_idle) 333 mov r1, r7 334#endif 335 336#ifdef MULTIPROCESSOR 337 /* XXX use curcpu() */ 338 ldr r0, .Lcpu_info_store 339 str r0, [r6, #(P_CPU)] 340#else 341 /* p->p_cpu initialized in fork1() for single-processor */ 342#endif 343 344 /* Process is now on a processor. */ 345 mov r0, #SONPROC /* p->p_stat = SONPROC */ 346 strb r0, [r6, #(P_STAT)] 347 348 /* We have a new curproc now so make a note it */ 349 ldr r7, .Lcurproc 350 str r6, [r7] 351 352 /* Hook in a new pcb */ 353 ldr r7, .Lcurpcb 354 ldr r0, [r6, #(P_ADDR)] 355 str r0, [r7] 356 357 /* At this point we can allow IRQ's again. */ 358 IRQenable 359 360 /* rem: r1 = old proc */ 361 /* rem: r6 = new process */ 362 /* rem: interrupts are enabled */ 363 364 /* 365 * If the new process is the same as the process that called 366 * cpu_switch() then we do not need to save and restore any 367 * contexts. This means we can make a quick exit. 368 * The test is simple if curproc on entry (now in r1) is the 369 * same as the proc removed from the queue we can jump to the exit. 370 */ 371 teq r1, r6 372 beq .Lswitch_return 373 374 /* Remember the old process in r0 */ 375 mov r0, r1 376 377 /* 378 * If the curproc on entry to cpu_switch was zero then the 379 * process that called it was exiting. This means that we do 380 * not need to save the current context. Instead we can jump 381 * straight to restoring the context for the new process. 382 */ 383 teq r0, #0x00000000 384 beq .Lswitch_exited 385 386 /* rem: r0 = old proc */ 387 /* rem: r6 = new process */ 388 /* rem: interrupts are enabled */ 389 390 /* Stage two : Save old context */ 391 392 /* Get the user structure for the old process. */ 393 ldr r1, [r0, #(P_ADDR)] 394 395 /* Save all the registers in the old process's pcb */ 396 add r7, r1, #(PCB_R8) 397 stmia r7, {r8-r13} 398 399 /* 400 * This can be optimised... We know we want to go from SVC32 401 * mode to UND32 mode 402 */ 403 mrs r3, cpsr 404 bic r2, r3, #(PSR_MODE) 405 orr r2, r2, #(PSR_UND32_MODE | I32_bit) 406 msr cpsr_c, r2 407 408 str sp, [r1, #(PCB_UND_SP)] 409 410 msr cpsr_c, r3 /* Restore the old mode */ 411 412 /* rem: r0 = old proc */ 413 /* rem: r1 = old pcb */ 414 /* rem: r6 = new process */ 415 /* rem: interrupts are enabled */ 416 417 /* What else needs to be saved Only FPA stuff when that is supported */ 418 419 /* r1 now free! */ 420 421 /* Third phase : restore saved context */ 422 423 /* rem: r0 = old proc */ 424 /* rem: r6 = new process */ 425 /* rem: interrupts are enabled */ 426 427 /* 428 * Don't allow user space access between the purge and the switch. 429 */ 430 ldr r3, .Lblock_userspace_access 431 mov r1, #0x00000001 432 mov r2, #0x00000000 433 str r1, [r3] 434 435 stmfd sp!, {r0-r3} 436 ldr r1, .Lcpufuncs 437 mov lr, pc 438 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] 439 ldmfd sp!, {r0-r3} 440 441.Lcs_cache_purge_skipped: 442 /* At this point we need to kill IRQ's again. */ 443 IRQdisable 444 445 /* 446 * Interrupts are disabled so we can allow user space accesses again 447 * as none will occur until interrupts are re-enabled after the 448 * switch. 449 */ 450 str r2, [r3] 451 452 /* Get the user structure for the new process in r1 */ 453 ldr r1, [r6, #(P_ADDR)] 454 455 /* Get the pagedir physical address for the process. */ 456 ldr r0, [r1, #(PCB_PAGEDIR)] 457 458 /* Switch the memory to the new process */ 459 ldr r3, .Lcpufuncs 460 mov lr, pc 461 ldr pc, [r3, #CF_CONTEXT_SWITCH] 462 463 /* 464 * This can be optimised... We know we want to go from SVC32 465 * mode to UND32 mode 466 */ 467 mrs r3, cpsr 468 bic r2, r3, #(PSR_MODE) 469 orr r2, r2, #(PSR_UND32_MODE) 470 msr cpsr_c, r2 471 472 ldr sp, [r1, #(PCB_UND_SP)] 473 474 msr cpsr_c, r3 /* Restore the old mode */ 475 476 /* Restore all the save registers */ 477 add r7, r1, #PCB_R8 478 ldmia r7, {r8-r13} 479 480 mov r7, r1 /* preserve PCB pointer */ 481 482#ifdef ARMFPE 483 add r0, r1, #(USER_SIZE) & 0x00ff 484 add r0, r0, #(USER_SIZE) & 0xff00 485 bl _C_LABEL(arm_fpe_core_changecontext) 486#endif 487 488 /* We can enable interrupts again */ 489 IRQenable 490 491 /* rem: r6 = new proc */ 492 /* rem: r7 = new PCB */ 493 494 /* 495 * Check for restartable atomic sequences (RAS). 496 */ 497 498 ldr r2, [r6, #(P_NRAS)] 499 ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */ 500 teq r2, #0 /* p->p_nras == 0? */ 501 bne .Lswitch_do_ras /* no, check for one */ 502 503.Lswitch_return: 504 505 /* Get the spl level from the stack and update the current spl level */ 506 ldr r0, [sp], #0x0004 507 bl _C_LABEL(splx) 508 509 /* cpu_switch returns the proc it switched to. */ 510 mov r0, r6 511 512 /* 513 * Pull the registers that got pushed when either savectx() or 514 * cpu_switch() was called and return. 515 */ 516 ldmfd sp!, {r4-r7, pc} 517 518.Lswitch_do_ras: 519 ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */ 520 mov r0, r6 /* first ras_lookup() arg */ 521 bl _C_LABEL(ras_lookup) 522 cmn r0, #1 /* -1 means "not in a RAS" */ 523 strne r0, [r4, #(TF_PC)] 524 b .Lswitch_return 525 526.Lswitch_exited: 527 /* 528 * We skip the cache purge because switch_exit() already did 529 * it. Load up registers the way Lcs_cache_purge_skipped 530 * expects. Userspace access already blocked in switch_exit(). 531 */ 532 ldr r3, .Lblock_userspace_access 533 mov r2, #0x00000000 534 b .Lcs_cache_purge_skipped 535 536/* 537 * void switch_exit(struct proc *p, struct proc *p0); 538 * Switch to proc0's saved context and deallocate the address space and kernel 539 * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along. 540 */ 541 542/* LINTSTUB: Func: void switch_exit(struct proc *p, struct proc *p0) */ 543ENTRY(switch_exit) 544 /* 545 * r0 = proc 546 * r1 = proc0 547 */ 548 549 mov r3, r0 550 551 /* In case we fault */ 552 ldr r0, .Lcurproc 553 mov r2, #0x00000000 554 str r2, [r0] 555 556/* ldr r0, .Lcurpcb 557 str r2, [r0]*/ 558 559 /* 560 * Don't allow user space access between the purge and the switch. 561 */ 562 ldr r0, .Lblock_userspace_access 563 mov r2, #0x00000001 564 str r2, [r0] 565 566 /* Switch to proc0 context */ 567 568 stmfd sp!, {r0-r3} 569 570 ldr r0, .Lcpufuncs 571 mov lr, pc 572 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] 573 574 ldmfd sp!, {r0-r3} 575 576 IRQdisable 577 578 ldr r2, [r1, #(P_ADDR)] 579 ldr r0, [r2, #(PCB_PAGEDIR)] 580 581 /* Switch the memory to the new process */ 582 ldr r4, .Lcpufuncs 583 mov lr, pc 584 ldr pc, [r4, #CF_CONTEXT_SWITCH] 585 586 /* Restore all the save registers */ 587 add r7, r2, #PCB_R8 588 ldmia r7, {r8-r13} 589 590 /* This is not really needed ! */ 591 /* Yes it is for the su and fu routines */ 592 ldr r0, .Lcurpcb 593 str r2, [r0] 594 595 IRQenable 596 597/* str r3, [sp, #-0x0004]!*/ 598 599 /* 600 * Schedule the vmspace and stack to be freed. 601 */ 602 mov r0, r3 /* exit2(p) */ 603 bl _C_LABEL(exit2) 604 605 /* Paranoia */ 606 ldr r1, .Lcurproc 607 mov r0, #0x00000000 608 str r0, [r1] 609 610 ldr r7, .Lwhichqs /* r7 = &whichqs */ 611 mov r5, #0x00000000 /* r5 = old proc = NULL */ 612 b .Lswitch_search 613 614/* LINTSTUB: Func: void savectx(struct pcb *pcb) */ 615ENTRY(savectx) 616 /* 617 * r0 = pcb 618 */ 619 620 /* Push registers.*/ 621 stmfd sp!, {r4-r7, lr} 622 623 /* Store all the registers in the process's pcb */ 624 add r2, r0, #(PCB_R8) 625 stmia r2, {r8-r13} 626 627 /* Pull the regs of the stack */ 628 ldmfd sp!, {r4-r7, pc} 629 630ENTRY(proc_trampoline) 631#ifdef MULTIPROCESSOR 632 bl _C_LABEL(proc_trampoline_mp) 633#endif 634 mov r0, r5 635 mov r1, sp 636 mov lr, pc 637 mov pc, r4 638 639 /* Kill irq's */ 640 mrs r0, cpsr 641 orr r0, r0, #(I32_bit) 642 msr cpsr_c, r0 643 644 PULLFRAME 645 646 movs pc, lr /* Exit */ 647 648 .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT; 649.Lcpu_switch_ffs_table: 650/* same as ffs table but all nums are -1 from that */ 651/* 0 1 2 3 4 5 6 7 */ 652 .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */ 653 .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */ 654 .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */ 655 .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */ 656 .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */ 657 .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */ 658 .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */ 659 .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */ 660 661/* End of cpuswitch.S */ 662