xref: /netbsd/sys/arch/sh3/sh3/sh3_machdep.c (revision 6550d01e)
1 /*	$NetBSD: sh3_machdep.c,v 1.90 2011/01/28 21:06:08 uwe Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*-
34  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
35  * All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * William Jolitz.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
65  */
66 
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: sh3_machdep.c,v 1.90 2011/01/28 21:06:08 uwe Exp $");
69 
70 #include "opt_ddb.h"
71 #include "opt_kgdb.h"
72 #include "opt_memsize.h"
73 #include "opt_kstack_debug.h"
74 #include "opt_ptrace.h"
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 
79 #include <sys/buf.h>
80 #include <sys/exec.h>
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/mount.h>
84 #include <sys/proc.h>
85 #include <sys/signalvar.h>
86 #include <sys/ras.h>
87 #include <sys/sa.h>
88 #include <sys/savar.h>
89 #include <sys/syscallargs.h>
90 #include <sys/ucontext.h>
91 
92 #ifdef KGDB
93 #include <sys/kgdb.h>
94 #ifndef KGDB_DEVNAME
95 #define	KGDB_DEVNAME "nodev"
96 #endif
97 const char kgdb_devname[] = KGDB_DEVNAME;
98 #endif /* KGDB */
99 
100 #include <uvm/uvm.h>
101 
102 #include <sh3/cache.h>
103 #include <sh3/clock.h>
104 #include <sh3/exception.h>
105 #include <sh3/locore.h>
106 #include <sh3/mmu.h>
107 #include <sh3/pcb.h>
108 #include <sh3/intr.h>
109 #include <sh3/ubcreg.h>
110 
111 /* Our exported CPU info; we can have only one. */
112 struct cpu_info cpu_info_store;
113 int cpu_arch;
114 int cpu_product;
115 char cpu_model[120];
116 
117 struct vm_map *phys_map;
118 
119 struct pcb *curpcb;
120 
121 #if !defined(IOM_RAM_BEGIN)
122 #error "define IOM_RAM_BEGIN"
123 #elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
124 #error "IOM_RAM_BEGIN is physical address. not P1 address."
125 #endif
126 
127 #define	VBR	(uint8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
128 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
129 /* exception handler holder (sh3/sh3/exception_vector.S) */
130 extern char sh_vector_generic[], sh_vector_generic_end[];
131 extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
132 #ifdef SH3
133 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
134 #endif
135 #ifdef SH4
136 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
137 #endif
138 /*
139  * These variables are needed by /sbin/savecore
140  */
141 uint32_t dumpmag = 0x8fca0101;	/* magic number */
142 int dumpsize;			/* pages */
143 long dumplo;	 		/* blocks */
144 
145 
146 void
147 sh_cpu_init(int arch, int product)
148 {
149 	/* CPU type */
150 	cpu_arch = arch;
151 	cpu_product = product;
152 
153 #if defined(SH3) && defined(SH4)
154 	/* Set register addresses */
155 	sh_devreg_init();
156 #endif
157 	/* Cache access ops. */
158 	sh_cache_init();
159 
160 	/* MMU access ops. */
161 	sh_mmu_init();
162 
163 	/* Hardclock, RTC initialize. */
164 	machine_clock_init();
165 
166 	/* ICU initiailze. */
167 	curcpu()->ci_idepth = -1;
168 	intc_init();
169 
170 	/* Exception vector. */
171 	memcpy(VBR + 0x100, sh_vector_generic,
172 	    sh_vector_generic_end - sh_vector_generic);
173 #ifdef SH3
174 	if (CPU_IS_SH3)
175 		memcpy(VBR + 0x400, sh3_vector_tlbmiss,
176 		    sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
177 #endif
178 #ifdef SH4
179 	if (CPU_IS_SH4)
180 		memcpy(VBR + 0x400, sh4_vector_tlbmiss,
181 		    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
182 #endif
183 	memcpy(VBR + 0x600, sh_vector_interrupt,
184 	    sh_vector_interrupt_end - sh_vector_interrupt);
185 
186 	if (!SH_HAS_UNIFIED_CACHE)
187 		sh_icache_sync_all();
188 
189 	__asm volatile("ldc %0, vbr" :: "r"(VBR));
190 
191 	/* kernel stack setup */
192 	__sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
193 
194 	/* Set page size (4KB) */
195 	uvm_setpagesize();
196 
197 	/* setup UBC channel A for single-stepping */
198 #if defined(PTRACE) || defined(DDB)
199 	_reg_write_2(SH_(BBRA), 0); /* disable channel A */
200 	_reg_write_2(SH_(BBRB), 0); /* disable channel B */
201 
202 #ifdef SH3
203 	if (CPU_IS_SH3) {
204 		/* A: break after execution, ignore ASID */
205 		_reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN
206 					| SH3_UBC_CTL_A_MASK_ASID));
207 
208 		/* A: compare all address bits */
209 		_reg_write_4(SH3_BAMRA, 0x00000000);
210 	}
211 #endif	/* SH3 */
212 
213 #ifdef SH4
214 	if (CPU_IS_SH4) {
215 		/* A: break after execution */
216 		_reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN);
217 
218 		/* A: compare all address bits, ignore ASID */
219 		_reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID);
220 	}
221 #endif	/* SH4 */
222 #endif
223 }
224 
225 
226 /*
227  * void sh_proc0_init(void):
228  *	Setup proc0 u-area.
229  */
230 void
231 sh_proc0_init()
232 {
233 	struct switchframe *sf;
234 	vaddr_t u;
235 
236 	/* Steal process0 u-area */
237 	u = uvm_pageboot_alloc(USPACE);
238 	memset((void *)u, 0, USPACE);
239 
240 	/* Setup uarea for lwp0 */
241 	uvm_lwp_setuarea(&lwp0, u);
242 
243 	/*
244 	 * u-area map:
245 	 * |pcb| .... | .................. |
246 	 * | PAGE_SIZE | USPACE - PAGE_SIZE |
247          *        frame bot        stack bot
248 	 * current frame ... r6_bank
249 	 * stack bottom  ... r7_bank
250 	 * current stack ... r15
251 	 */
252 	curpcb = lwp_getpcb(&lwp0);
253 	lwp0.l_md.md_pcb = curpcb;
254 
255 	sf = &curpcb->pcb_sf;
256 
257 #ifdef KSTACK_DEBUG
258 	memset((char *)(u + sizeof(struct pcb)), 0x5a,
259 	    PAGE_SIZE - sizeof(struct pcb));
260 	memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE);
261 	memset(sf, 0xb4, sizeof(struct switchframe));
262 #endif /* KSTACK_DEBUG */
263 
264 	sf->sf_r6_bank = u + PAGE_SIZE;
265 	sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
266 	__asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
267 	__asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
268 
269 	lwp0.l_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
270 }
271 
272 void
273 sh_startup(void)
274 {
275 	vaddr_t minaddr, maxaddr;
276 	char pbuf[9];
277 
278 	printf("%s%s", copyright, version);
279 	if (*cpu_model != '\0')
280 		printf("%s", cpu_model);
281 #ifdef DEBUG
282 	printf("general exception handler:\t%d byte\n",
283 	    sh_vector_generic_end - sh_vector_generic);
284 	printf("TLB miss exception handler:\t%d byte\n",
285 #if defined(SH3) && defined(SH4)
286 	    CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
287 	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
288 #elif defined(SH3)
289 	    sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
290 #elif defined(SH4)
291 	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
292 #endif
293 	    );
294 	printf("interrupt exception handler:\t%d byte\n",
295 	    sh_vector_interrupt_end - sh_vector_interrupt);
296 #endif /* DEBUG */
297 
298 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
299 	printf("total memory = %s\n", pbuf);
300 
301 	minaddr = 0;
302 
303 	/*
304 	 * Allocate a submap for physio
305 	 */
306 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
307 	    VM_PHYS_SIZE, 0, false, NULL);
308 
309 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
310 	printf("avail memory = %s\n", pbuf);
311 }
312 
313 /*
314  * This is called by main to set dumplo and dumpsize.
315  * Dumps always skip the first CLBYTES of disk space
316  * in case there might be a disk label stored there.
317  * If there is extra space, put dump at the end to
318  * reduce the chance that swapping trashes it.
319  */
320 void
321 cpu_dumpconf(void)
322 {
323 }
324 
325 void
326 dumpsys(void)
327 {
328 }
329 
330 /*
331  * void cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted,
332  *     void *sas, void *ap, void *sp, sa_upcall_t upcall):
333  *
334  * Send an upcall to userland.
335  */
336 void
337 cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas,
338     void *ap, void *sp, sa_upcall_t upcall)
339 {
340 	struct trapframe *tf;
341 	struct saframe *sf, frame;
342 
343 	tf = l->l_md.md_regs;
344 
345 	/* Build the stack frame. */
346 #if 0 /* First 4 args in regs (see below). */
347 	frame.sa_type = type;
348 	frame.sa_sas = sas;
349 	frame.sa_events = nevents;
350 	frame.sa_interrupted = ninterrupted;
351 #endif
352 	frame.sa_arg = ap;
353 
354 	sf = (struct saframe *)sp - 1;
355 	if (copyout(&frame, sf, sizeof(frame)) != 0) {
356 		/* Copying onto the stack didn't work.  Die. */
357 		sigexit(l, SIGILL);
358 		/* NOTREACHED */
359 	}
360 
361 	tf->tf_r4 = type;
362 	tf->tf_r5 = (int) sas;
363 	tf->tf_r6 = nevents;
364 	tf->tf_r7 = ninterrupted;
365 
366 	tf->tf_spc = (int) upcall;
367 	tf->tf_pr = 0;		/* no return */
368 	tf->tf_r15 = (int) sf;
369 }
370 
371 /*
372  * Get the base address of the signal frame either on the lwp's stack
373  * or on the signal stack and set *onstack accordingly.  Caller then
374  * just subtracts the size of appropriate struct sigframe_foo.
375  */
376 void *
377 getframe(const struct lwp *l, int sig, int *onstack)
378 {
379 	const struct proc *p = l->l_proc;
380 	const struct sigaltstack *sigstk= &l->l_sigstk;
381 
382 	/* Do we need to jump onto the signal stack? */
383 	*onstack = (sigstk->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
384 		&& (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
385 
386 	if (*onstack)
387 		return ((char *)sigstk->ss_sp + sigstk->ss_size);
388 	else
389 		return ((void *)l->l_md.md_regs->tf_r15);
390 }
391 
392 void
393 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
394 {
395 	struct lwp *l = curlwp;
396 	struct proc *p = l->l_proc;
397 	struct sigacts *ps = p->p_sigacts;
398 	struct trapframe *tf = l->l_md.md_regs;
399 	int sig = ksi->ksi_signo, error;
400 	sig_t catcher = SIGACTION(p, sig).sa_handler;
401 	struct sigframe_siginfo *fp, frame;
402 	int onstack;
403 
404 	fp = getframe(l, sig, &onstack);
405 	--fp;
406 
407 	frame.sf_si._info = ksi->ksi_info;
408 	frame.sf_uc.uc_link = l->l_ctxlink;
409 	frame.sf_uc.uc_sigmask = *mask;
410 	frame.sf_uc.uc_flags = _UC_SIGMASK;
411 	frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
412 		? _UC_SETSTACK : _UC_CLRSTACK;
413 	memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
414 	sendsig_reset(l, sig);
415 	mutex_exit(p->p_lock);
416 	cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
417 	error = copyout(&frame, fp, sizeof(frame));
418 	mutex_enter(p->p_lock);
419 
420 	if (error != 0) {
421 		/*
422 		 * Process has trashed its stack; give it an illegal
423 		 * instruction to halt it in its tracks.
424 		 */
425 		sigexit(l, SIGILL);
426 		/* NOTREACHED */
427 	}
428 
429 	tf->tf_r4 = sig;		/* "signum" argument for handler */
430 	tf->tf_r5 = (int)&fp->sf_si;	/* "sip" argument for handler */
431 	tf->tf_r6 = (int)&fp->sf_uc;	/* "ucp" argument for handler */
432  	tf->tf_spc = (int)catcher;
433 	tf->tf_r15 = (int)fp;
434 	tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp;
435 
436 	/* Remember if we're now on the signal stack. */
437 	if (onstack)
438 		l->l_sigstk.ss_flags |= SS_ONSTACK;
439 }
440 
441 void
442 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
443 {
444 	const struct trapframe *tf = l->l_md.md_regs;
445 	__greg_t *gr = mcp->__gregs;
446 	__greg_t ras_pc;
447 
448 	/* Save register context. */
449 	gr[_REG_GBR]    = tf->tf_gbr;
450 	gr[_REG_PC]     = tf->tf_spc;
451 	gr[_REG_SR]     = tf->tf_ssr;
452 	gr[_REG_MACL]   = tf->tf_macl;
453 	gr[_REG_MACH]   = tf->tf_mach;
454 	gr[_REG_PR]     = tf->tf_pr;
455 	gr[_REG_R14]    = tf->tf_r14;
456 	gr[_REG_R13]    = tf->tf_r13;
457 	gr[_REG_R12]    = tf->tf_r12;
458 	gr[_REG_R11]    = tf->tf_r11;
459 	gr[_REG_R10]    = tf->tf_r10;
460 	gr[_REG_R9]     = tf->tf_r9;
461 	gr[_REG_R8]     = tf->tf_r8;
462 	gr[_REG_R7]     = tf->tf_r7;
463 	gr[_REG_R6]     = tf->tf_r6;
464 	gr[_REG_R5]     = tf->tf_r5;
465 	gr[_REG_R4]     = tf->tf_r4;
466 	gr[_REG_R3]     = tf->tf_r3;
467 	gr[_REG_R2]     = tf->tf_r2;
468 	gr[_REG_R1]     = tf->tf_r1;
469 	gr[_REG_R0]     = tf->tf_r0;
470 	gr[_REG_R15]    = tf->tf_r15;
471 
472 	if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
473 	    (void *) gr[_REG_PC])) != -1)
474 		gr[_REG_PC] = ras_pc;
475 
476 	*flags |= _UC_CPU;
477 
478 	/* FPU context is currently not handled by the kernel. */
479 	memset(&mcp->__fpregs, 0, sizeof (mcp->__fpregs));
480 }
481 
482 int
483 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
484 {
485 	struct trapframe *tf = l->l_md.md_regs;
486 	const __greg_t *gr = mcp->__gregs;
487 	struct proc *p = l->l_proc;
488 
489 	/* Restore register context, if any. */
490 	if ((flags & _UC_CPU) != 0) {
491 		/* Check for security violations. */
492 		if (((tf->tf_ssr ^ gr[_REG_SR]) & PSL_USERSTATIC) != 0)
493 			return (EINVAL);
494 
495 		tf->tf_gbr    = gr[_REG_GBR];
496 		tf->tf_spc    = gr[_REG_PC];
497 		tf->tf_ssr    = gr[_REG_SR];
498 		tf->tf_macl   = gr[_REG_MACL];
499 		tf->tf_mach   = gr[_REG_MACH];
500 		tf->tf_pr     = gr[_REG_PR];
501 		tf->tf_r14    = gr[_REG_R14];
502 		tf->tf_r13    = gr[_REG_R13];
503 		tf->tf_r12    = gr[_REG_R12];
504 		tf->tf_r11    = gr[_REG_R11];
505 		tf->tf_r10    = gr[_REG_R10];
506 		tf->tf_r9     = gr[_REG_R9];
507 		tf->tf_r8     = gr[_REG_R8];
508 		tf->tf_r7     = gr[_REG_R7];
509 		tf->tf_r6     = gr[_REG_R6];
510 		tf->tf_r5     = gr[_REG_R5];
511 		tf->tf_r4     = gr[_REG_R4];
512 		tf->tf_r3     = gr[_REG_R3];
513 		tf->tf_r2     = gr[_REG_R2];
514 		tf->tf_r1     = gr[_REG_R1];
515 		tf->tf_r0     = gr[_REG_R0];
516 		tf->tf_r15    = gr[_REG_R15];
517 	}
518 
519 #if 0
520 	/* XXX: FPU context is currently not handled by the kernel. */
521 	if (flags & _UC_FPU) {
522 		/* TODO */;
523 	}
524 #endif
525 
526 	mutex_enter(p->p_lock);
527 	if (flags & _UC_SETSTACK)
528 		l->l_sigstk.ss_flags |= SS_ONSTACK;
529 	if (flags & _UC_CLRSTACK)
530 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
531 	mutex_exit(p->p_lock);
532 
533 	return (0);
534 }
535 
536 /*
537  * Clear registers on exec
538  */
539 void
540 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
541 {
542 	struct trapframe *tf;
543 
544 	l->l_md.md_flags &= ~(MDP_USEDFPU | MDP_SSTEP);
545 
546 	tf = l->l_md.md_regs;
547 
548 	tf->tf_ssr = PSL_USERSET;
549 	tf->tf_spc = pack->ep_entry;
550 	tf->tf_pr = 0;
551 
552 	tf->tf_gbr = 0;
553 	tf->tf_macl = 0;
554 	tf->tf_mach = 0;
555 
556 	tf->tf_r0 = 0;
557 	tf->tf_r1 = 0;
558 	tf->tf_r2 = 0;
559 	tf->tf_r3 = 0;
560 	tf->tf_r4 = fuword((void *)stack);	/* argc */
561 	tf->tf_r5 = stack + 4;			/* argv */
562 	tf->tf_r6 = stack + 4 * tf->tf_r4 + 8;	/* envp */
563 	tf->tf_r7 = 0;
564 	tf->tf_r8 = 0;
565 	tf->tf_r9 = (int)l->l_proc->p_psstr;
566 	tf->tf_r10 = 0;
567 	tf->tf_r11 = 0;
568 	tf->tf_r12 = 0;
569 	tf->tf_r13 = 0;
570 	tf->tf_r14 = 0;
571 	tf->tf_r15 = stack;
572 }
573 
574 /*
575  * Jump to reset vector.
576  */
577 void
578 cpu_reset(void)
579 {
580 
581 	_cpu_exception_suspend();
582 	_reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
583 
584 #ifndef __lint__
585 	goto *(void *)0xa0000000;
586 #endif
587 	/* NOTREACHED */
588 }
589