1 /* $NetBSD: sh3_machdep.c,v 1.112 2022/02/23 21:54:40 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*-
34 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * William Jolitz.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: sh3_machdep.c,v 1.112 2022/02/23 21:54:40 andvar Exp $");
69
70 #include "opt_ddb.h"
71 #include "opt_kgdb.h"
72 #include "opt_memsize.h"
73 #include "opt_kstack_debug.h"
74 #include "opt_ptrace.h"
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78
79 #include <sys/buf.h>
80 #include <sys/exec.h>
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/mount.h>
84 #include <sys/proc.h>
85 #include <sys/signalvar.h>
86 #include <sys/ras.h>
87 #include <sys/syscallargs.h>
88 #include <sys/ucontext.h>
89 #include <sys/cpu.h>
90 #include <sys/bus.h>
91
92 #ifdef KGDB
93 #include <sys/kgdb.h>
94 #ifndef KGDB_DEVNAME
95 #define KGDB_DEVNAME "nodev"
96 #endif
97 const char kgdb_devname[] = KGDB_DEVNAME;
98 #endif /* KGDB */
99
100 #include <uvm/uvm.h>
101
102 #include <sh3/cache.h>
103 #include <sh3/clock.h>
104 #include <sh3/exception.h>
105 #include <sh3/locore.h>
106 #include <sh3/mmu.h>
107 #include <sh3/pcb.h>
108 #include <sh3/intr.h>
109 #include <sh3/ubcreg.h>
110
111 /* Our exported CPU info; we can have only one. */
112 struct cpu_info cpu_info_store;
113 int cpu_arch;
114 int cpu_product;
115
116 struct vm_map *phys_map;
117
118 struct pcb *curpcb;
119
120 #if !defined(IOM_RAM_BEGIN)
121 #error "define IOM_RAM_BEGIN"
122 #elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
123 #error "IOM_RAM_BEGIN is physical address. not P1 address."
124 #endif
125
126 #define VBR (uint8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
127 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
128 /* exception handler holder (sh3/sh3/exception_vector.S) */
129 extern char sh_vector_generic[], sh_vector_generic_end[];
130 extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
131 #ifdef SH3
132 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
133 #endif
134 #ifdef SH4
135 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
136 #endif
137 /*
138 * These variables are needed by /sbin/savecore
139 */
140 uint32_t dumpmag = 0x8fca0101; /* magic number */
141 int dumpsize; /* pages */
142 long dumplo; /* blocks */
143
144
145 void
sh_cpu_init(int arch,int product)146 sh_cpu_init(int arch, int product)
147 {
148 /* CPU type */
149 cpu_arch = arch;
150 cpu_product = product;
151
152 #if defined(SH3) && defined(SH4)
153 /* Set register addresses */
154 sh_devreg_init();
155 #endif
156 /* Cache access ops. */
157 sh_cache_init();
158
159 /* MMU access ops. */
160 sh_mmu_init();
161
162 /* Hardclock, RTC initialize. */
163 machine_clock_init();
164
165 /* ICU initialize. */
166 curcpu()->ci_idepth = -1;
167 intc_init();
168
169 /* Exception vector. */
170 memcpy(VBR + 0x100, sh_vector_generic,
171 sh_vector_generic_end - sh_vector_generic);
172 #ifdef SH3
173 if (CPU_IS_SH3)
174 memcpy(VBR + 0x400, sh3_vector_tlbmiss,
175 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
176 #endif
177 #ifdef SH4
178 if (CPU_IS_SH4)
179 memcpy(VBR + 0x400, sh4_vector_tlbmiss,
180 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
181 #endif
182 memcpy(VBR + 0x600, sh_vector_interrupt,
183 sh_vector_interrupt_end - sh_vector_interrupt);
184
185 if (!SH_HAS_UNIFIED_CACHE)
186 sh_icache_sync_all();
187
188 __asm volatile("ldc %0, vbr" :: "r"(VBR));
189
190 /* kernel stack setup */
191 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
192
193 uvm_md_init();
194 /* setup UBC channel A for single-stepping */
195 #if defined(PTRACE_HOOKS) || defined(DDB)
196 _reg_write_2(SH_(BBRA), 0); /* disable channel A */
197 _reg_write_2(SH_(BBRB), 0); /* disable channel B */
198
199 #ifdef SH3
200 if (CPU_IS_SH3) {
201 /* A: break after execution, ignore ASID */
202 _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN
203 | SH3_UBC_CTL_A_MASK_ASID));
204
205 /* A: compare all address bits */
206 _reg_write_4(SH3_BAMRA, 0x00000000);
207 }
208 #endif /* SH3 */
209
210 #ifdef SH4
211 if (CPU_IS_SH4) {
212 /* A: break after execution */
213 _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN);
214
215 /* A: compare all address bits, ignore ASID */
216 _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID);
217 }
218 #endif /* SH4 */
219 #endif
220 }
221
222
223 /*
224 * void sh_proc0_init(void):
225 * Setup proc0 u-area.
226 */
227 void
sh_proc0_init(void)228 sh_proc0_init(void)
229 {
230 struct switchframe *sf;
231 vaddr_t u;
232
233 /* Steal process0 u-area */
234 u = uvm_pageboot_alloc(USPACE);
235 memset((void *)u, 0, USPACE);
236
237 /* Setup uarea for lwp0 */
238 uvm_lwp_setuarea(&lwp0, u);
239
240 /*
241 * u-area map:
242 * |pcb| .... | .................. |
243 * | PAGE_SIZE | USPACE - PAGE_SIZE |
244 * frame bot stack bot
245 * current frame ... r6_bank
246 * stack bottom ... r7_bank
247 * current stack ... r15
248 */
249 curpcb = lwp_getpcb(&lwp0);
250 lwp0.l_md.md_pcb = curpcb;
251
252 sf = &curpcb->pcb_sf;
253
254 #ifdef KSTACK_DEBUG
255 memset((char *)(u + sizeof(struct pcb)), 0x5a,
256 PAGE_SIZE - sizeof(struct pcb));
257 memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE);
258 memset(sf, 0xb4, sizeof(struct switchframe));
259 #endif /* KSTACK_DEBUG */
260
261 sf->sf_r6_bank = u + PAGE_SIZE;
262 sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
263 __asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
264 __asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
265
266 lwp0.l_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
267 }
268
269 void
sh_startup(void)270 sh_startup(void)
271 {
272 vaddr_t minaddr, maxaddr;
273 char pbuf[9];
274 const char *model = cpu_getmodel();
275
276 printf("%s%s", copyright, version);
277 if (*model != '\0')
278 printf("%s\n", model);
279 #ifdef DEBUG
280 printf("general exception handler:\t%d byte\n",
281 sh_vector_generic_end - sh_vector_generic);
282 printf("TLB miss exception handler:\t%d byte\n",
283 #if defined(SH3) && defined(SH4)
284 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
285 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
286 #elif defined(SH3)
287 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
288 #elif defined(SH4)
289 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
290 #endif
291 );
292 printf("interrupt exception handler:\t%d byte\n",
293 sh_vector_interrupt_end - sh_vector_interrupt);
294 #endif /* DEBUG */
295
296 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
297 printf("total memory = %s\n", pbuf);
298
299 minaddr = 0;
300
301 /*
302 * Allocate a submap for physio
303 */
304 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
305 VM_PHYS_SIZE, 0, false, NULL);
306
307 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
308 printf("avail memory = %s\n", pbuf);
309 }
310
311 /*
312 * This is called by main to set dumplo and dumpsize.
313 * Dumps always skip the first CLBYTES of disk space
314 * in case there might be a disk label stored there.
315 * If there is extra space, put dump at the end to
316 * reduce the chance that swapping trashes it.
317 */
318 void
cpu_dumpconf(void)319 cpu_dumpconf(void)
320 {
321 }
322
323 void
dumpsys(void)324 dumpsys(void)
325 {
326 }
327
328 /*
329 * Get the base address of the signal frame either on the lwp's stack
330 * or on the signal stack and set *onstack accordingly. Caller then
331 * just subtracts the size of appropriate struct sigframe_foo.
332 */
333 void *
getframe(const struct lwp * l,int sig,int * onstack)334 getframe(const struct lwp *l, int sig, int *onstack)
335 {
336 const struct proc *p = l->l_proc;
337 const stack_t *sigstk = &l->l_sigstk;
338
339 /* Do we need to jump onto the signal stack? */
340 *onstack = (sigstk->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
341 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
342
343 if (*onstack)
344 return ((char *)sigstk->ss_sp + sigstk->ss_size);
345 else
346 return ((void *)l->l_md.md_regs->tf_r15);
347 }
348
349 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)350 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
351 {
352 struct lwp *l = curlwp;
353 struct proc *p = l->l_proc;
354 struct sigacts *ps = p->p_sigacts;
355 struct trapframe *tf = l->l_md.md_regs;
356 int sig = ksi->ksi_signo, error;
357 sig_t catcher = SIGACTION(p, sig).sa_handler;
358 struct sigframe_siginfo *fp, frame;
359 int onstack;
360
361 fp = getframe(l, sig, &onstack);
362 --fp;
363
364 memset(&frame, 0, sizeof(frame));
365 frame.sf_si._info = ksi->ksi_info;
366 frame.sf_uc.uc_link = l->l_ctxlink;
367 frame.sf_uc.uc_sigmask = *mask;
368 frame.sf_uc.uc_flags = _UC_SIGMASK;
369 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
370 ? _UC_SETSTACK : _UC_CLRSTACK;
371 sendsig_reset(l, sig);
372 mutex_exit(p->p_lock);
373 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
374 error = copyout(&frame, fp, sizeof(frame));
375 mutex_enter(p->p_lock);
376
377 if (error != 0) {
378 /*
379 * Process has trashed its stack; give it an illegal
380 * instruction to halt it in its tracks.
381 */
382 sigexit(l, SIGILL);
383 /* NOTREACHED */
384 }
385
386 tf->tf_r4 = sig; /* "signum" argument for handler */
387 tf->tf_r5 = (int)&fp->sf_si; /* "sip" argument for handler */
388 tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */
389 tf->tf_spc = (int)catcher;
390 tf->tf_r15 = (int)fp;
391 tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp;
392
393 /* Remember if we're now on the signal stack. */
394 if (onstack)
395 l->l_sigstk.ss_flags |= SS_ONSTACK;
396 }
397
398 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)399 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
400 {
401 const struct trapframe *tf = l->l_md.md_regs;
402 __greg_t *gr = mcp->__gregs;
403 __greg_t ras_pc;
404
405 /* Save register context. */
406 gr[_REG_GBR] = tf->tf_gbr;
407 gr[_REG_PC] = tf->tf_spc;
408 gr[_REG_SR] = tf->tf_ssr;
409 gr[_REG_MACL] = tf->tf_macl;
410 gr[_REG_MACH] = tf->tf_mach;
411 gr[_REG_PR] = tf->tf_pr;
412 gr[_REG_R14] = tf->tf_r14;
413 gr[_REG_R13] = tf->tf_r13;
414 gr[_REG_R12] = tf->tf_r12;
415 gr[_REG_R11] = tf->tf_r11;
416 gr[_REG_R10] = tf->tf_r10;
417 gr[_REG_R9] = tf->tf_r9;
418 gr[_REG_R8] = tf->tf_r8;
419 gr[_REG_R7] = tf->tf_r7;
420 gr[_REG_R6] = tf->tf_r6;
421 gr[_REG_R5] = tf->tf_r5;
422 gr[_REG_R4] = tf->tf_r4;
423 gr[_REG_R3] = tf->tf_r3;
424 gr[_REG_R2] = tf->tf_r2;
425 gr[_REG_R1] = tf->tf_r1;
426 gr[_REG_R0] = tf->tf_r0;
427 gr[_REG_R15] = tf->tf_r15;
428
429 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
430 (void *) gr[_REG_PC])) != -1)
431 gr[_REG_PC] = ras_pc;
432
433 *flags |= (_UC_CPU|_UC_TLSBASE);
434
435 /* FPU context is currently not handled by the kernel. */
436 memset(&mcp->__fpregs, 0, sizeof (mcp->__fpregs));
437 }
438
439 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)440 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
441 {
442 struct trapframe *tf = l->l_md.md_regs;
443 const __greg_t *gr = mcp->__gregs;
444
445 if (((tf->tf_ssr ^ gr[_REG_SR]) & PSL_USERSTATIC) != 0)
446 return EINVAL;
447
448 return 0;
449 }
450
451 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)452 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
453 {
454 struct trapframe *tf = l->l_md.md_regs;
455 const __greg_t *gr = mcp->__gregs;
456 struct proc *p = l->l_proc;
457 int error;
458
459 /* Restore register context, if any. */
460 if ((flags & _UC_CPU) != 0) {
461 /* Check for security violations. */
462 error = cpu_mcontext_validate(l, mcp);
463 if (error)
464 return error;
465
466 /* done in lwp_setprivate */
467 /* tf->tf_gbr = gr[_REG_GBR]; */
468 tf->tf_spc = gr[_REG_PC];
469 tf->tf_ssr = gr[_REG_SR];
470 tf->tf_macl = gr[_REG_MACL];
471 tf->tf_mach = gr[_REG_MACH];
472 tf->tf_pr = gr[_REG_PR];
473 tf->tf_r14 = gr[_REG_R14];
474 tf->tf_r13 = gr[_REG_R13];
475 tf->tf_r12 = gr[_REG_R12];
476 tf->tf_r11 = gr[_REG_R11];
477 tf->tf_r10 = gr[_REG_R10];
478 tf->tf_r9 = gr[_REG_R9];
479 tf->tf_r8 = gr[_REG_R8];
480 tf->tf_r7 = gr[_REG_R7];
481 tf->tf_r6 = gr[_REG_R6];
482 tf->tf_r5 = gr[_REG_R5];
483 tf->tf_r4 = gr[_REG_R4];
484 tf->tf_r3 = gr[_REG_R3];
485 tf->tf_r2 = gr[_REG_R2];
486 tf->tf_r1 = gr[_REG_R1];
487 tf->tf_r0 = gr[_REG_R0];
488 tf->tf_r15 = gr[_REG_R15];
489
490 if (flags & _UC_TLSBASE)
491 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_GBR]);
492 }
493
494 #if 0
495 /* XXX: FPU context is currently not handled by the kernel. */
496 if (flags & _UC_FPU) {
497 /* TODO */;
498 }
499 #endif
500
501 mutex_enter(p->p_lock);
502 if (flags & _UC_SETSTACK)
503 l->l_sigstk.ss_flags |= SS_ONSTACK;
504 if (flags & _UC_CLRSTACK)
505 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
506 mutex_exit(p->p_lock);
507
508 return (0);
509 }
510
511 /*
512 * Clear registers on exec
513 */
514 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)515 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
516 {
517 struct trapframe *tf;
518
519 l->l_md.md_flags &= ~(MDL_USEDFPU | MDL_SSTEP);
520
521 tf = l->l_md.md_regs;
522
523 tf->tf_ssr = PSL_USERSET;
524 tf->tf_spc = pack->ep_entry;
525 tf->tf_pr = 0;
526
527 tf->tf_gbr = 0;
528 tf->tf_macl = 0;
529 tf->tf_mach = 0;
530
531 tf->tf_r0 = 0;
532 tf->tf_r1 = 0;
533 tf->tf_r2 = 0;
534 tf->tf_r3 = 0;
535 if (ufetch_int((void *)stack, (u_int *)&tf->tf_r4) != 0) /* argc */
536 tf->tf_r4 = -1;
537 tf->tf_r5 = stack + 4; /* argv */
538 tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */
539 tf->tf_r7 = 0;
540 tf->tf_r8 = 0;
541 tf->tf_r9 = l->l_proc->p_psstrp;
542 tf->tf_r10 = 0;
543 tf->tf_r11 = 0;
544 tf->tf_r12 = 0;
545 tf->tf_r13 = 0;
546 tf->tf_r14 = 0;
547 tf->tf_r15 = stack;
548 }
549
550 /*
551 * Jump to reset vector.
552 */
553 void
cpu_reset(void)554 cpu_reset(void)
555 {
556
557 _cpu_exception_suspend();
558 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
559
560 #ifndef __lint__
561 goto *(void *)0xa0000000;
562 #endif
563 /* NOTREACHED */
564 }
565
566 int
cpu_lwp_setprivate(lwp_t * l,void * addr)567 cpu_lwp_setprivate(lwp_t *l, void *addr)
568 {
569
570 l->l_md.md_regs->tf_gbr = (int)addr;
571 return 0;
572 }
573
574