1 /* $NetBSD: sh3_machdep.c,v 1.41 2002/05/10 15:25:13 uch Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /*- 41 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 42 * All rights reserved. 43 * 44 * This code is derived from software contributed to Berkeley by 45 * William Jolitz. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 3. All advertising materials mentioning features or use of this software 56 * must display the following acknowledgement: 57 * This product includes software developed by the University of 58 * California, Berkeley and its contributors. 59 * 4. Neither the name of the University nor the names of its contributors 60 * may be used to endorse or promote products derived from this software 61 * without specific prior written permission. 62 * 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 73 * SUCH DAMAGE. 74 * 75 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 76 */ 77 78 #include "opt_kgdb.h" 79 #include "opt_memsize.h" 80 #include "opt_compat_netbsd.h" 81 #include "opt_kstack_debug.h" 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 86 #include <sys/buf.h> 87 #include <sys/exec.h> 88 #include <sys/kernel.h> 89 #include <sys/malloc.h> 90 #include <sys/mount.h> 91 #include <sys/proc.h> 92 #include <sys/signalvar.h> 93 #include <sys/syscallargs.h> 94 #include <sys/user.h> 95 96 #ifdef KGDB 97 #include <sys/kgdb.h> 98 #ifndef KGDB_DEVNAME 99 #define KGDB_DEVNAME "nodev" 100 #endif 101 const char kgdb_devname[] = KGDB_DEVNAME; 102 #endif /* KGDB */ 103 104 #include <uvm/uvm_extern.h> 105 106 #include <sh3/cache.h> 107 #include <sh3/clock.h> 108 #include <sh3/exception.h> 109 #include <sh3/locore.h> 110 #include <sh3/mmu.h> 111 #include <sh3/intr.h> 112 113 /* Our exported CPU info; we can have only one. */ 114 struct cpu_info cpu_info_store; 115 int cpu_arch; 116 int cpu_product; 117 char cpu_model[120]; 118 119 struct vm_map *exec_map; 120 struct vm_map *mb_map; 121 struct vm_map *phys_map; 122 123 int physmem; 124 struct user *proc0paddr; /* init_main.c use this. */ 125 struct pcb *curpcb; 126 struct md_upte *curupte; /* SH3 wired u-area hack */ 127 128 #if !defined(IOM_RAM_BEGIN) 129 #error "define IOM_RAM_BEGIN" 130 #elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0 131 #error "IOM_RAM_BEGIN is physical address. not P1 address." 132 #endif 133 134 #define VBR (u_int8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN) 135 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN); 136 /* exception handler holder (sh3/sh3/exception_vector.S) */ 137 extern char sh_vector_generic[], sh_vector_generic_end[]; 138 extern char sh_vector_interrupt[], sh_vector_interrupt_end[]; 139 #ifdef SH3 140 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[]; 141 #endif 142 #ifdef SH4 143 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[]; 144 #endif 145 /* 146 * These variables are needed by /sbin/savecore 147 */ 148 u_int32_t dumpmag = 0x8fca0101; /* magic number */ 149 int dumpsize; /* pages */ 150 long dumplo; /* blocks */ 151 152 void 153 sh_cpu_init(int arch, int product) 154 { 155 /* CPU type */ 156 cpu_arch = arch; 157 cpu_product = product; 158 159 #if defined(SH3) && defined(SH4) 160 /* Set register addresses */ 161 sh_devreg_init(); 162 #endif 163 /* Cache access ops. */ 164 sh_cache_init(); 165 166 /* MMU access ops. */ 167 sh_mmu_init(); 168 169 /* Hardclock, RTC initialize. */ 170 machine_clock_init(); 171 172 /* ICU initiailze. */ 173 intc_init(); 174 175 /* Exception vector. */ 176 memcpy(VBR + 0x100, sh_vector_generic, 177 sh_vector_generic_end - sh_vector_generic); 178 #ifdef SH3 179 if (CPU_IS_SH3) 180 memcpy(VBR + 0x400, sh3_vector_tlbmiss, 181 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); 182 #endif 183 #ifdef SH4 184 if (CPU_IS_SH4) 185 memcpy(VBR + 0x400, sh4_vector_tlbmiss, 186 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); 187 #endif 188 memcpy(VBR + 0x600, sh_vector_interrupt, 189 sh_vector_interrupt_end - sh_vector_interrupt); 190 191 if (!SH_HAS_UNIFIED_CACHE) 192 sh_icache_sync_all(); 193 194 __asm__ __volatile__("ldc %0, vbr" :: "r"(VBR)); 195 196 /* kernel stack setup */ 197 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; 198 199 /* Set page size (4KB) */ 200 uvm_setpagesize(); 201 } 202 203 /* 204 * void sh_proc0_init(void): 205 * Setup proc0 u-area. 206 */ 207 void 208 sh_proc0_init() 209 { 210 struct switchframe *sf; 211 vaddr_t u; 212 213 /* Steal process0 u-area */ 214 u = uvm_pageboot_alloc(USPACE); 215 memset((void *)u, 0, USPACE); 216 217 /* Setup proc0 */ 218 proc0paddr = (struct user *)u; 219 proc0.p_addr = proc0paddr; 220 /* 221 * u-area map: 222 * |user| .... | ............... | 223 * | NBPG | USPACE - NBPG | 224 * frame top stack top 225 * current frame ... r6_bank 226 * stack top ... r7_bank 227 * current stack ... r15 228 */ 229 curpcb = proc0.p_md.md_pcb = &proc0.p_addr->u_pcb; 230 curupte = proc0.p_md.md_upte; 231 232 sf = &curpcb->pcb_sf; 233 sf->sf_r6_bank = u + NBPG; 234 sf->sf_r7_bank = sf->sf_r15 = u + USPACE; 235 __asm__ __volatile__("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank)); 236 __asm__ __volatile__("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank)); 237 238 proc0.p_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1; 239 #ifdef KSTACK_DEBUG 240 memset((char *)(u + sizeof(struct user)), 0x5a, 241 NBPG - sizeof(struct user)); 242 memset((char *)(u + NBPG), 0xa5, USPACE - NBPG); 243 #endif /* KSTACK_DEBUG */ 244 } 245 246 void 247 sh_startup() 248 { 249 int i, base, residual; 250 vaddr_t minaddr, maxaddr; 251 vsize_t size; 252 char pbuf[9]; 253 254 printf(version); 255 if (*cpu_model != '\0') 256 printf("%s", cpu_model); 257 #ifdef DEBUG 258 printf("general exception handler:\t%d byte\n", 259 sh_vector_generic_end - sh_vector_generic); 260 printf("TLB miss exception handler:\t%d byte\n", 261 #if defined(SH3) && defined(SH4) 262 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss : 263 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss 264 #elif defined(SH3) 265 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss 266 #elif defined(SH4) 267 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss 268 #endif 269 ); 270 printf("interrupt exception handler:\t%d byte\n", 271 sh_vector_interrupt_end - sh_vector_interrupt); 272 #endif /* DEBUG */ 273 274 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 275 printf("total memory = %s\n", pbuf); 276 277 /* 278 * Now allocate buffers proper. They are different than the above 279 * in that they usually occupy more virtual memory than physical. 280 */ 281 size = MAXBSIZE * nbuf; 282 buffers = 0; 283 if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), 284 NULL, UVM_UNKNOWN_OFFSET, 0, 285 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 286 UVM_ADV_NORMAL, 0)) != 0) 287 panic("sh3_startup: cannot allocate VM for buffers"); 288 minaddr = (vaddr_t)buffers; 289 if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { 290 /* don't want to alloc more physical mem than needed */ 291 bufpages = btoc(MAXBSIZE) * nbuf; 292 } 293 294 base = bufpages / nbuf; 295 residual = bufpages % nbuf; 296 for (i = 0; i < nbuf; i++) { 297 vsize_t curbufsize; 298 vaddr_t curbuf; 299 struct vm_page *pg; 300 301 /* 302 * Each buffer has MAXBSIZE bytes of VM space allocated. Of 303 * that MAXBSIZE space, we allocate and map (base+1) pages 304 * for the first "residual" buffers, and then we allocate 305 * "base" pages for the rest. 306 */ 307 curbuf = (vaddr_t) buffers + (i * MAXBSIZE); 308 curbufsize = NBPG * ((i < residual) ? (base+1) : base); 309 310 while (curbufsize) { 311 pg = uvm_pagealloc(NULL, 0, NULL, 0); 312 if (pg == NULL) 313 panic("sh3_startup: not enough memory for " 314 "buffer cache"); 315 pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), 316 VM_PROT_READ|VM_PROT_WRITE); 317 curbuf += PAGE_SIZE; 318 curbufsize -= PAGE_SIZE; 319 } 320 } 321 pmap_update(pmap_kernel()); 322 323 /* 324 * Allocate a submap for exec arguments. This map effectively 325 * limits the number of processes exec'ing at any time. 326 */ 327 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 328 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); 329 330 /* 331 * Allocate a submap for physio 332 */ 333 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 334 VM_PHYS_SIZE, 0, FALSE, NULL); 335 336 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 337 printf("avail memory = %s\n", pbuf); 338 format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG); 339 printf("using %d buffers containing %s of memory\n", nbuf, pbuf); 340 341 /* 342 * Set up buffers, so they can be used to read disk labels. 343 */ 344 bufinit(); 345 } 346 347 /* 348 * This is called by main to set dumplo and dumpsize. 349 * Dumps always skip the first CLBYTES of disk space 350 * in case there might be a disk label stored there. 351 * If there is extra space, put dump at the end to 352 * reduce the chance that swapping trashes it. 353 */ 354 void 355 cpu_dumpconf() 356 { 357 } 358 359 void 360 dumpsys() 361 { 362 } 363 364 /* 365 * Send an interrupt to process. 366 * 367 * Stack is set up to allow sigcode stored 368 * in u. to call routine, followed by kcall 369 * to sigreturn routine below. After sigreturn 370 * resets the signal mask, the stack, and the 371 * frame pointer, it returns to the user 372 * specified pc, psl. 373 */ 374 void 375 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 376 { 377 struct proc *p = curproc; 378 struct trapframe *tf; 379 struct sigframe *fp, frame; 380 int onstack; 381 382 tf = p->p_md.md_regs; 383 384 /* Do we need to jump onto the signal stack? */ 385 onstack = 386 (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 387 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; 388 389 /* Allocate space for the signal handler context. */ 390 if (onstack) 391 fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp + 392 p->p_sigctx.ps_sigstk.ss_size); 393 else 394 fp = (struct sigframe *)tf->tf_r15; 395 fp--; 396 397 /* Build stack frame for signal trampoline. */ 398 frame.sf_signum = sig; 399 frame.sf_code = code; 400 frame.sf_scp = &fp->sf_sc; 401 frame.sf_handler = catcher; 402 403 /* Save register context. */ 404 frame.sf_sc.sc_ssr = tf->tf_ssr; 405 frame.sf_sc.sc_spc = tf->tf_spc; 406 frame.sf_sc.sc_pr = tf->tf_pr; 407 frame.sf_sc.sc_r15 = tf->tf_r15; 408 frame.sf_sc.sc_r14 = tf->tf_r14; 409 frame.sf_sc.sc_r13 = tf->tf_r13; 410 frame.sf_sc.sc_r12 = tf->tf_r12; 411 frame.sf_sc.sc_r11 = tf->tf_r11; 412 frame.sf_sc.sc_r10 = tf->tf_r10; 413 frame.sf_sc.sc_r9 = tf->tf_r9; 414 frame.sf_sc.sc_r8 = tf->tf_r8; 415 frame.sf_sc.sc_r7 = tf->tf_r7; 416 frame.sf_sc.sc_r6 = tf->tf_r6; 417 frame.sf_sc.sc_r5 = tf->tf_r5; 418 frame.sf_sc.sc_r4 = tf->tf_r4; 419 frame.sf_sc.sc_r3 = tf->tf_r3; 420 frame.sf_sc.sc_r2 = tf->tf_r2; 421 frame.sf_sc.sc_r1 = tf->tf_r1; 422 frame.sf_sc.sc_r0 = tf->tf_r0; 423 frame.sf_sc.sc_expevt = tf->tf_expevt; 424 425 /* Save signal stack. */ 426 frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK; 427 428 /* Save signal mask. */ 429 frame.sf_sc.sc_mask = *mask; 430 431 if (copyout(&frame, fp, sizeof(frame)) != 0) { 432 /* 433 * Process has trashed its stack; give it an illegal 434 * instruction to halt it in its tracks. 435 */ 436 sigexit(p, SIGILL); 437 /* NOTREACHED */ 438 } 439 440 /* 441 * Build context to run handler in. 442 */ 443 tf->tf_spc = (int)p->p_sigctx.ps_sigcode; 444 tf->tf_r15 = (int)fp; 445 446 /* Remember that we're now on the signal stack. */ 447 if (onstack) 448 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; 449 } 450 451 /* 452 * System call to cleanup state after a signal 453 * has been taken. Reset signal mask and 454 * stack state from context left by sendsig (above). 455 * Return to previous pc and psl as specified by 456 * context left by sendsig. Check carefully to 457 * make sure that the user has not modified the 458 * psl to gain improper privileges or to cause 459 * a machine fault. 460 */ 461 int 462 sys___sigreturn14(struct proc *p, void *v, register_t *retval) 463 { 464 struct sys___sigreturn14_args /* { 465 syscallarg(struct sigcontext *) sigcntxp; 466 } */ *uap = v; 467 struct sigcontext *scp, context; 468 struct trapframe *tf; 469 470 /* 471 * The trampoline code hands us the context. 472 * It is unsafe to keep track of it ourselves, in the event that a 473 * program jumps out of a signal handler. 474 */ 475 scp = SCARG(uap, sigcntxp); 476 if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) 477 return (EFAULT); 478 479 /* Restore signal context. */ 480 tf = p->p_md.md_regs; 481 482 /* Check for security violations. */ 483 if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) 484 return (EINVAL); 485 486 tf->tf_ssr = context.sc_ssr; 487 488 tf->tf_r0 = context.sc_r0; 489 tf->tf_r1 = context.sc_r1; 490 tf->tf_r2 = context.sc_r2; 491 tf->tf_r3 = context.sc_r3; 492 tf->tf_r4 = context.sc_r4; 493 tf->tf_r5 = context.sc_r5; 494 tf->tf_r6 = context.sc_r6; 495 tf->tf_r7 = context.sc_r7; 496 tf->tf_r8 = context.sc_r8; 497 tf->tf_r9 = context.sc_r9; 498 tf->tf_r10 = context.sc_r10; 499 tf->tf_r11 = context.sc_r11; 500 tf->tf_r12 = context.sc_r12; 501 tf->tf_r13 = context.sc_r13; 502 tf->tf_r14 = context.sc_r14; 503 tf->tf_spc = context.sc_spc; 504 tf->tf_r15 = context.sc_r15; 505 tf->tf_pr = context.sc_pr; 506 507 /* Restore signal stack. */ 508 if (context.sc_onstack & SS_ONSTACK) 509 p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; 510 else 511 p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; 512 /* Restore signal mask. */ 513 (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0); 514 515 return (EJUSTRETURN); 516 } 517 518 /* 519 * Clear registers on exec 520 */ 521 void 522 setregs(struct proc *p, struct exec_package *pack, u_long stack) 523 { 524 struct trapframe *tf; 525 526 p->p_md.md_flags &= ~MDP_USEDFPU; 527 528 tf = p->p_md.md_regs; 529 530 tf->tf_r0 = 0; 531 tf->tf_r1 = 0; 532 tf->tf_r2 = 0; 533 tf->tf_r3 = 0; 534 tf->tf_r4 = fuword((caddr_t)stack); /* argc */ 535 tf->tf_r5 = stack + 4; /* argv */ 536 tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */ 537 tf->tf_r7 = 0; 538 tf->tf_r8 = 0; 539 tf->tf_r9 = (int)p->p_psstr; 540 tf->tf_r10 = 0; 541 tf->tf_r11 = 0; 542 tf->tf_r12 = 0; 543 tf->tf_r13 = 0; 544 tf->tf_r14 = 0; 545 tf->tf_spc = pack->ep_entry; 546 tf->tf_ssr = PSL_USERSET; 547 tf->tf_r15 = stack; 548 } 549 550 /* 551 * Jump to reset vector. 552 */ 553 void 554 cpu_reset() 555 { 556 557 _cpu_exception_suspend(); 558 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL); 559 560 goto *(u_int32_t *)0xa0000000; 561 /* NOTREACHED */ 562 while (1) 563 ; 564 } 565